Followup on Proposal to move MIR physical register namespace to '$' sigil.

Discussed here:

http://lists.llvm.org/pipermail/llvm-dev/2018-January/120320.html

In preparation for adding support for named vregs we are changing the sigil for
physical registers in MIR to '$' from '%'. This will prevent name clashes of
named physical register with named vregs.

llvm-svn: 323922
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
index 9b53b02..a976738 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-flat.mir
@@ -13,16 +13,16 @@
 regBankSelected: true
 
 # GCN: global_addrspace
-# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1
+# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
 # GCN: FLAT_LOAD_DWORD  [[PTR]], 0, 0, 0
 
 body: |
   bb.0:
-    liveins:  %vgpr0_vgpr1
+    liveins:  $vgpr0_vgpr1
 
-    %0:vgpr(p1) = COPY %vgpr0_vgpr1
+    %0:vgpr(p1) = COPY $vgpr0_vgpr1
     %1:vgpr(s32) = G_LOAD %0 :: (load 4 from %ir.global0)
-    %vgpr0 = COPY %1
+    $vgpr0 = COPY %1
 
 ...
 ---
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
index 70e2b5e..4f4655e 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-load-smrd.mir
@@ -14,7 +14,7 @@
 regBankSelected: true
 
 # GCN: body:
-# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY %sgpr0_sgpr1
+# GCN: [[PTR:%[0-9]+]]:sreg_64 = COPY $sgpr0_sgpr1
 
 # Immediate offset:
 # SICI: S_LOAD_DWORD_IMM [[PTR]], 1, 0
@@ -89,54 +89,54 @@
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0:sgpr(p2) = COPY %sgpr0_sgpr1
+    %0:sgpr(p2) = COPY $sgpr0_sgpr1
 
     %1:sgpr(s64) = G_CONSTANT i64 4
     %2:sgpr(p2) = G_GEP %0, %1
     %3:sgpr(s32) = G_LOAD %2 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %3
+    $sgpr0 = COPY %3
 
     %4:sgpr(s64) = G_CONSTANT i64 1020
     %5:sgpr(p2) = G_GEP %0, %4
     %6:sgpr(s32) = G_LOAD %5 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %6
+    $sgpr0 = COPY %6
 
     %7:sgpr(s64) = G_CONSTANT i64 1024
     %8:sgpr(p2) = G_GEP %0, %7
     %9:sgpr(s32) = G_LOAD %8 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %9
+    $sgpr0 = COPY %9
 
     %10:sgpr(s64) = G_CONSTANT i64 1048572
     %11:sgpr(p2) = G_GEP %0, %10
     %12:sgpr(s32) = G_LOAD %11 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %12
+    $sgpr0 = COPY %12
 
     %13:sgpr(s64) = G_CONSTANT i64 1048576
     %14:sgpr(p2) = G_GEP %0, %13
     %15:sgpr(s32) = G_LOAD %14 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %15
+    $sgpr0 = COPY %15
 
     %16:sgpr(s64) = G_CONSTANT i64 17179869180
     %17:sgpr(p2) = G_GEP %0, %16
     %18:sgpr(s32) = G_LOAD %17 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %18
+    $sgpr0 = COPY %18
 
     %19:sgpr(s64) = G_CONSTANT i64 17179869184
     %20:sgpr(p2) = G_GEP %0, %19
     %21:sgpr(s32) = G_LOAD %20 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %21
+    $sgpr0 = COPY %21
 
     %22:sgpr(s64) = G_CONSTANT i64 4294967292
     %23:sgpr(p2) = G_GEP %0, %22
     %24:sgpr(s32) = G_LOAD %23 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %24
+    $sgpr0 = COPY %24
 
     %25:sgpr(s64) = G_CONSTANT i64 4294967296
     %26:sgpr(p2) = G_GEP %0, %25
     %27:sgpr(s32) = G_LOAD %26 :: (load 4 from %ir.const0)
-    %sgpr0 = COPY %27
+    $sgpr0 = COPY %27
 
 ...
 ---
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
index 0b80927..2deef60 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/inst-select-store-flat.mir
@@ -13,16 +13,16 @@
 regBankSelected: true
 
 # GCN: global_addrspace
-# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY %vgpr0_vgpr1
-# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY %vgpr2
+# GCN: [[PTR:%[0-9]+]]:vreg_64 = COPY $vgpr0_vgpr1
+# GCN: [[VAL:%[0-9]+]]:vgpr_32 = COPY $vgpr2
 # GCN: FLAT_STORE_DWORD [[PTR]], [[VAL]], 0, 0, 0
 
 body: |
   bb.0:
-    liveins:  %vgpr0_vgpr1, %vgpr2
+    liveins:  $vgpr0_vgpr1, $vgpr2
 
-    %0:vgpr(p1) = COPY %vgpr0_vgpr1
-    %1:vgpr(s32) = COPY %vgpr2
+    %0:vgpr(p1) = COPY $vgpr0_vgpr1
+    %1:vgpr(s32) = COPY $vgpr2
     G_STORE %1, %0 :: (store 4 into %ir.global0)
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll
index ebcdac3..e59e3f3 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/irtranslator-amdgpu_vs.ll
@@ -2,7 +2,7 @@
 
 
 ; CHECK-LABEL: name: test_f32_inreg
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]]
 define amdgpu_vs void @test_f32_inreg(float inreg %arg0) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
@@ -10,7 +10,7 @@
 }
 
 ; CHECK-LABEL: name: test_f32
-; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0
+; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]]
 define amdgpu_vs void @test_f32(float %arg0) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg0, float undef, float undef, float undef, i1 false, i1 false) #0
@@ -18,7 +18,7 @@
 }
 
 ; CHECK-LABEL: name: test_ptr2_byval
-; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1
+; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1
 ; CHECK: G_LOAD [[S01]]
 define amdgpu_vs void @test_ptr2_byval(i32 addrspace(2)* byval %arg0) {
    %tmp0 = load volatile i32, i32 addrspace(2)* %arg0
@@ -26,7 +26,7 @@
 }
 
 ; CHECK-LABEL: name: test_ptr2_inreg
-; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY %sgpr0_sgpr1
+; CHECK: [[S01:%[0-9]+]]:_(p2) = COPY $sgpr0_sgpr1
 ; CHECK: G_LOAD [[S01]]
 define amdgpu_vs void @test_ptr2_inreg(i32 addrspace(2)* inreg %arg0) {
   %tmp0 = load volatile i32, i32 addrspace(2)* %arg0
@@ -34,8 +34,8 @@
 }
 
 ; CHECK-LABEL: name: test_sgpr_alignment0
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
-; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY %sgpr2_sgpr3
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
+; CHECK: [[S23:%[0-9]+]]:_(p2) = COPY $sgpr2_sgpr3
 ; CHECK: G_LOAD [[S23]]
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[S0]]
 define amdgpu_vs void @test_sgpr_alignment0(float inreg %arg0, i32 addrspace(2)* inreg %arg1) {
@@ -45,10 +45,10 @@
 }
 
 ; CHECK-LABEL: name: test_order
-; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY %sgpr0
-; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY %sgpr1
-; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY %vgpr0
-; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY %vgpr1
+; CHECK: [[S0:%[0-9]+]]:_(s32) = COPY $sgpr0
+; CHECK: [[S1:%[0-9]+]]:_(s32) = COPY $sgpr1
+; CHECK: [[V0:%[0-9]+]]:_(s32) = COPY $vgpr0
+; CHECK: [[V1:%[0-9]+]]:_(s32) = COPY $vgpr1
 ; CHECK: G_INTRINSIC_W_SIDE_EFFECTS intrinsic(@llvm.amdgcn.exp), %{{[0-9]+}}(s32), %{{[0-9]+}}(s32), [[V0]](s32), [[S0]](s32), [[V1]](s32), [[S1]](s32)
 define amdgpu_vs void @test_order(float inreg %arg0, float inreg %arg1, float %arg2, float %arg3) {
   call void @llvm.amdgcn.exp.f32(i32 32, i32 15, float %arg2, float %arg0, float %arg3, float %arg1, i1 false, i1 false) #0
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir
index 3afb2c7..01940eb 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-add.mir
@@ -13,14 +13,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_add
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[ADD:%[0-9]+]]:_(s32) = G_ADD [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_ADD %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
index c22355c..876c491 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir
@@ -13,14 +13,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_and
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[AND:%[0-9]+]]:_(s32) = G_AND [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_AND %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir
index 1f0fbd0..0825bf1 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-bitcast.mir
@@ -13,14 +13,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
     ; CHECK-LABEL: name: test_bitcast
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[BITCAST:%[0-9]+]]:_(<2 x s16>) = G_BITCAST [[COPY]](s32)
     ; CHECK: [[BITCAST1:%[0-9]+]]:_(s32) = G_BITCAST [[BITCAST]](<2 x s16>)
-    %0(s32) = COPY %vgpr0
+    %0(s32) = COPY $vgpr0
     %1(<2 x s16>) = G_BITCAST %0
     %2(s32) = G_BITCAST %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
index 843c03f..860be07 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-constant.mir
@@ -47,7 +47,7 @@
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_FCONSTANT float 1.000000e+00
     ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_FCONSTANT float 7.500000e+00
     %0(s32) = G_FCONSTANT float 1.0
-    %vgpr0 = COPY %0
+    $vgpr0 = COPY %0
     %1(s32) = G_FCONSTANT float 7.5
-    %vgpr0 = COPY %1
+    $vgpr0 = COPY %1
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir
index 0db061e..a6e77a5 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fadd.mir
@@ -16,12 +16,12 @@
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
     ; CHECK-LABEL: name: test_fadd
     ; CHECK: %2:_(s32) = G_FADD %0, %1
 
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_FADD %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir
index 98bbd6a..49e9a61 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-fmul.mir
@@ -13,14 +13,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_fmul
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[FMUL:%[0-9]+]]:_(s32) = G_FMUL [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_FMUL %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir
index bb6b68a..ae2e4a2 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir
@@ -16,16 +16,16 @@
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0
+    liveins: $vgpr0
     ; CHECK-LABEL: name: test_icmp
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]]
     ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C]], [[COPY]]
-    ; CHECK: %vgpr0 = COPY [[SELECT]](s32)
+    ; CHECK: $vgpr0 = COPY [[SELECT]](s32)
     %0(s32) = G_CONSTANT i32 0
-    %1(s32) = COPY %vgpr0
+    %1(s32) = COPY $vgpr0
     %2(s1) = G_ICMP intpred(ne), %0, %1
     %3:_(s32) = G_SELECT %2(s1), %0(s32), %1(s32)
-    %vgpr0 = COPY %3
+    $vgpr0 = COPY %3
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
index 8780a1e..1d92673 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir
@@ -12,14 +12,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_or
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[OR:%[0-9]+]]:_(s32) = G_OR [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_OR %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir
index af63378..95612a8 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir
@@ -16,21 +16,21 @@
   - { id: 5, class: _ }
 body: |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
     ; CHECK-LABEL: name: test_select
     ; CHECK: [[C:%[0-9]+]]:_(s32) = G_CONSTANT i32 0
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
     ; CHECK: [[ICMP:%[0-9]+]]:_(s1) = G_ICMP intpred(ne), [[C]](s32), [[COPY]]
     ; CHECK: [[C1:%[0-9]+]]:_(s32) = G_CONSTANT i32 1
     ; CHECK: [[C2:%[0-9]+]]:_(s32) = G_CONSTANT i32 2
     ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[C1]], [[C2]]
     %0(s32) = G_CONSTANT i32 0
-    %1(s32) = COPY %vgpr0
+    %1(s32) = COPY $vgpr0
 
     %2(s1) = G_ICMP intpred(ne), %0, %1
     %3(s32) = G_CONSTANT i32 1
     %4(s32) = G_CONSTANT i32 2
     %5(s32) = G_SELECT %2, %3, %4
-    %vgpr0 = COPY %5
+    $vgpr0 = COPY %5
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
index d6320fa..0dfec34 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/legalize-shl.mir
@@ -9,14 +9,14 @@
   - { id: 2, class: _ }
 body: |
   bb.0.entry:
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
     ; CHECK-LABEL: name: test_shl
-    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY %vgpr0
-    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY %vgpr1
+    ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY $vgpr0
+    ; CHECK: [[COPY1:%[0-9]+]]:_(s32) = COPY $vgpr1
     ; CHECK: [[SHL:%[0-9]+]]:_(s32) = G_SHL [[COPY]], [[COPY1]]
-    %0(s32) = COPY %vgpr0
-    %1(s32) = COPY %vgpr1
+    %0(s32) = COPY $vgpr0
+    %1(s32) = COPY $vgpr1
     %2(s32) = G_SHL %0, %1
-    %vgpr0 = COPY %2
+    $vgpr0 = COPY %2
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
index 902f1e6..4467630 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/regbankselect.mir
@@ -29,8 +29,8 @@
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p2) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p2) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr0)
 ...
 
@@ -45,8 +45,8 @@
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p1) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p1) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.ptr1)
 ...
 
@@ -63,7 +63,7 @@
 
 body: |
   bb.0:
-    liveins: %sgpr0_sgpr1
-    %0:_(p1) = COPY %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
+    %0:_(p1) = COPY $sgpr0_sgpr1
     %1:_(s32) = G_LOAD %0 :: (load 4 from %ir.tmp1)
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir b/llvm/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir
index 261a9e4..0ddfa75 100644
--- a/llvm/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir
+++ b/llvm/test/CodeGen/AMDGPU/break-smem-soft-clauses.mir
@@ -8,9 +8,9 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x1
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -20,11 +20,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -34,13 +34,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x3
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr6_sgpr7, 0, 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr6_sgpr7, 0, 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     S_ENDPGM
 ...
 ---
@@ -50,15 +50,15 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x4
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    ; GCN-NEXT: %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
-    ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
+    ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
-    %sgpr1 = S_LOAD_DWORD_IMM %sgpr8_sgpr9, 0, 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
-    %sgpr3 = S_LOAD_DWORD_IMM %sgpr16_sgpr17, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
+    $sgpr1 = S_LOAD_DWORD_IMM $sgpr8_sgpr9, 0, 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
+    $sgpr3 = S_LOAD_DWORD_IMM $sgpr16_sgpr17, 0, 0
     S_ENDPGM
 ...
 ---
@@ -67,11 +67,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_smem_clause_load_smrd4_x2_sameptr
-    ; GCN: %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -81,9 +81,9 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load4_overwrite_ptr_lo
-    ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -93,9 +93,9 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load4_overwrite_ptr_hi
-    ; GCN: %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr11 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr11 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -105,9 +105,9 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_load8_overwrite_ptr
-    ; GCN: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     S_ENDPGM
 ...
 ---
@@ -119,47 +119,47 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_at_max_smem_clause_size_smrd_load4
-    ; GCN: %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0
-    ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28
+    ; GCN: $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0
+    ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28
     ; GCN-NEXT: S_ENDPGM
-    %sgpr13 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr14 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr15 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr16 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr13 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr14 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr15 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr16 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
 
-    %sgpr17 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr18 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr19 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr20 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr17 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr18 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr19 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr20 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
 
-    %sgpr21 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr22 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr23 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr24 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr21 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr22 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr23 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr24 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
 
-    %sgpr25 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr26 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr27 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr28 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr25 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr26 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr27 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr28 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
 
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr30_sgpr31, 0, 0
-    %sgpr0 = S_MOV_B32 %sgpr0, implicit %sgpr13, implicit %sgpr14, implicit %sgpr15, implicit %sgpr16, implicit %sgpr17, implicit %sgpr18, implicit %sgpr19, implicit %sgpr20, implicit %sgpr21, implicit %sgpr22, implicit %sgpr23, implicit %sgpr24, implicit %sgpr25, implicit %sgpr26, implicit %sgpr27, implicit %sgpr28
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr30_sgpr31, 0, 0
+    $sgpr0 = S_MOV_B32 $sgpr0, implicit $sgpr13, implicit $sgpr14, implicit $sgpr15, implicit $sgpr16, implicit $sgpr17, implicit $sgpr18, implicit $sgpr19, implicit $sgpr20, implicit $sgpr21, implicit $sgpr22, implicit $sgpr23, implicit $sgpr24, implicit $sgpr25, implicit $sgpr26, implicit $sgpr27, implicit $sgpr28
     S_ENDPGM
 ...
 ---
@@ -169,12 +169,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_lo_ptr
-    ; GCN: %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr10 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr10 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -184,11 +184,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd4_hi_ptr
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr3 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr3 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -198,12 +198,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd8_ptr
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN-NEXT: $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -213,11 +213,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_simple_load_smrd16_ptr
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM %sgpr6_sgpr7, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr12_sgpr13_sgpr14_sgpr15 = S_LOAD_DWORDX4_IMM $sgpr6_sgpr7, 0, 0
     S_ENDPGM
 ...
 ---
@@ -228,16 +228,16 @@
   ; GCN-LABEL: name: break_smem_clause_block_boundary_load_smrd8_ptr
   ; GCN: bb.0:
   ; GCN:   successors: %bb.1(0x80000000)
-  ; GCN:   %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+  ; GCN:   $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
   ; GCN: bb.1:
   ; XNACK-NEXT:   S_NOP 0
-  ; GCN-NEXT:   %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+  ; GCN-NEXT:   $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
   ; GCN-NEXT:   S_ENDPGM
   bb.0:
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0
 
   bb.1:
-    %sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr10_sgpr11 = S_LOAD_DWORDX2_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -248,11 +248,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_store_load_into_ptr_smrd4
-    ; GCN: S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    ; GCN: S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    S_STORE_DWORD_IMM %sgpr16, %sgpr10_sgpr11, 0, 0
-    %sgpr12 = S_LOAD_DWORD_IMM %sgpr14_sgpr15, 0, 0
+    S_STORE_DWORD_IMM $sgpr16, $sgpr10_sgpr11, 0, 0
+    $sgpr12 = S_LOAD_DWORD_IMM $sgpr14_sgpr15, 0, 0
     S_ENDPGM
 ...
 ---
@@ -264,11 +264,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_smem_clause_store_load_into_data_smrd4
-    ; GCN: S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    S_STORE_DWORD_IMM %sgpr8, %sgpr10_sgpr11, 0, 0
-    %sgpr8 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    S_STORE_DWORD_IMM $sgpr8, $sgpr10_sgpr11, 0, 0
+    $sgpr8 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -278,13 +278,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: valu_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -294,13 +294,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: salu_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %sgpr8 = S_MOV_B32 0
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $sgpr8 = S_MOV_B32 0
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %sgpr8 = S_MOV_B32 0
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $sgpr8 = S_MOV_B32 0
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -309,13 +309,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: ds_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -325,13 +325,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_inst_breaks_smem_clause
-    ; GCN: %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    ; GCN: $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0 = S_LOAD_DWORD_IMM %sgpr10_sgpr11, 0, 0
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr12_sgpr13, 0, 0
+    $sgpr0 = S_LOAD_DWORD_IMM $sgpr10_sgpr11, 0, 0
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr12_sgpr13, 0, 0
     S_ENDPGM
 ...
 ---
@@ -341,11 +341,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: implicit_use_breaks_smem_clause
-    ; GCN: %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13
+    ; GCN: $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0
+    ; GCN-NEXT: $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0
     ; GCN-NEXT: S_ENDPGM
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM %sgpr10_sgpr11, 0, 0, implicit %sgpr12_sgpr13
-    %sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM %sgpr6_sgpr7, 0, 0
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM $sgpr10_sgpr11, 0, 0, implicit $sgpr12_sgpr13
+    $sgpr12_sgpr13 = S_LOAD_DWORDX2_IMM $sgpr6_sgpr7, 0, 0
     S_ENDPGM
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir b/llvm/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir
index 92145d3..f2a88b6 100644
--- a/llvm/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir
+++ b/llvm/test/CodeGen/AMDGPU/break-vmem-soft-clauses.mir
@@ -7,10 +7,10 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x1
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -20,12 +20,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x2
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -35,14 +35,14 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x3
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr5_vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr5_vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -52,16 +52,16 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x4
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr8_vgpr9, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr10_vgpr11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr8_vgpr9, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr10_vgpr11, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -71,12 +71,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_flat4_x2_sameptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -86,10 +86,10 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load4_overwrite_ptr_lo
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -99,10 +99,10 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load4_overwrite_ptr_hi
-    ; GCN: %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -112,10 +112,10 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: flat_load8_overwrite_ptr
-    ; GCN: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -128,49 +128,49 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_at_max_clause_size_flat_load4
-    ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr4 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr5 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr4 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr5 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %vgpr6 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr7 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr9 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr6 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr7 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr9 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr12 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr13 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr12 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr13 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %vgpr14 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr15 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr16 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr17 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr14 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr15 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr16 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr17 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr0 = S_MOV_B32 %sgpr0, implicit %vgpr2, implicit %vgpr3, implicit %vgpr4, implicit %vgpr5, implicit %vgpr6, implicit %vgpr7, implicit %vgpr8, implicit %vgpr9, implicit %vgpr10, implicit %vgpr11, implicit %vgpr12, implicit %vgpr13, implicit %vgpr14, implicit %vgpr15, implicit %vgpr16, implicit %vgpr17, implicit %vgpr18
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr0 = S_MOV_B32 $sgpr0, implicit $vgpr2, implicit $vgpr3, implicit $vgpr4, implicit $vgpr5, implicit $vgpr6, implicit $vgpr7, implicit $vgpr8, implicit $vgpr9, implicit $vgpr10, implicit $vgpr11, implicit $vgpr12, implicit $vgpr13, implicit $vgpr14, implicit $vgpr15, implicit $vgpr16, implicit $vgpr17, implicit $vgpr18
     S_ENDPGM
 ...
 ---
@@ -180,13 +180,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat4_lo_ptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -196,13 +196,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat4_hi_ptr
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -212,13 +212,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat8_ptr
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -229,12 +229,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_flat16_ptr
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2_vgpr3_vgpr4_vgpr5 = FLAT_LOAD_DWORDX4 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -249,17 +249,17 @@
   ; GCN-LABEL: name: break_clause_block_boundary_load_flat8_ptr
   ; GCN: bb.0:
   ; GCN-NEXT:   successors: %bb.1(0x80000000)
-  ; GCN:   %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+  ; GCN:   $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
   ; GCN: bb.1:
   ; XNACK-NEXT:  S_NOP 0
-  ; GCN-NEXT:   %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+  ; GCN-NEXT:   $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
   ; GCN-NEXT:   S_ENDPGM
 
   bb.0:
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
 
   bb.1:
-    %vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2_vgpr3 = FLAT_LOAD_DWORDX2 $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -270,12 +270,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_store_load_into_ptr_flat4
-    ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -287,12 +287,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_store_load_into_data_flat4
-    ; GCN: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -303,15 +303,15 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: valu_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = V_MOV_B32_e32 0, implicit %exec
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = V_MOV_B32_e32 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -322,15 +322,15 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: salu_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr8 = S_MOV_B32 0
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr8 = S_MOV_B32 0
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr8 = S_MOV_B32 0
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr8 = S_MOV_B32 0
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -340,15 +340,15 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: ds_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr8 = DS_READ_B32 %vgpr9, 0, 0, implicit %m0, implicit %exec
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr8 = DS_READ_B32 $vgpr9, 0, 0, implicit $m0, implicit $exec
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -358,14 +358,14 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: smrd_inst_breaks_clause
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %sgpr8 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 0, 0
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $sgpr8 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 0, 0
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -375,13 +375,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: implicit_use_breaks_clause
-    ; GCN: %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5
+    ; GCN: $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr, implicit %vgpr4_vgpr5
-    %vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 %vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0_vgpr1 = FLAT_LOAD_DWORDX2 $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr, implicit $vgpr4_vgpr5
+    $vgpr4_vgpr5 = FLAT_LOAD_DWORDX2 $vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -390,12 +390,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: trivial_clause_load_mubuf4_x2
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFEN %vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFEN $vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -404,13 +404,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_simple_load_mubuf_offen_ptr
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -421,13 +421,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: mubuf_load4_overwrite_ptr
-    ; GCN: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr1 = V_MOV_B32_e32 0, implicit %exec
-    ; GCN-NEXT: %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec
+    ; GCN: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+    ; GCN-NEXT: $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr2 = V_MOV_B32_e32 %vgpr0, implicit %exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 $vgpr0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -438,29 +438,29 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_flat_load_mubuf_load
-    ; GCN: %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 # Break a clause from interference between mubuf and flat instructions
 
 # GCN-LABEL: name: break_clause_mubuf_load_flat_load
 # GCN: bb.0:
-# GCN-NEXT: %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4
+# GCN-NEXT: $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4
 # XNACK-NEXT: S_NOP 0
-# GCN-NEXT: %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3
+# GCN-NEXT: $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3
 # GCN-NEXT: S_ENDPGM
 name: break_clause_mubuf_load_flat_load
 
 body: |
   bb.0:
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr1 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr1 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 ...
@@ -471,13 +471,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_flat4
-    ; GCN: %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr4 = FLAT_ATOMIC_ADD_RTN %vgpr5_vgpr6, %vgpr7, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr4 = FLAT_ATOMIC_ADD_RTN $vgpr5_vgpr6, $vgpr7, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -486,12 +486,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_flat4
-    ; GCN: FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; GCN-NEXT: S_ENDPGM
 
-    FLAT_ATOMIC_ADD %vgpr0_vgpr1, %vgpr2, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr2 = FLAT_LOAD_DWORD %vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_ATOMIC_ADD $vgpr0_vgpr1, $vgpr2, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr2 = FLAT_LOAD_DWORD $vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -501,13 +501,13 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_rtn_into_ptr_mubuf4
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
+    ; GCN-NEXT: $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN %vgpr2, %vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr2 = BUFFER_ATOMIC_ADD_OFFEN_RTN $vgpr2, $vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -517,12 +517,12 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: break_clause_atomic_nortn_ptr_load_mubuf4
-    ; GCN: BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
 
-    BUFFER_ATOMIC_ADD_OFFEN %vgpr0, %vgpr1, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, implicit %exec
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFEN %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    BUFFER_ATOMIC_ADD_OFFEN $vgpr0, $vgpr1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, implicit $exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFEN $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -533,11 +533,11 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: no_break_clause_mubuf_load_novaddr
-    ; GCN: %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    ; GCN-NEXT: %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    ; GCN: $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    ; GCN-NEXT: $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     ; GCN-NEXT: S_ENDPGM
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -547,16 +547,16 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: mix_load_store_clause
-    ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    FLAT_STORE_DWORD %vgpr2_vgpr3, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    FLAT_STORE_DWORD $vgpr2_vgpr3, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -566,15 +566,15 @@
 body: |
   bb.0:
     ; GCN-LABEL: name: mix_load_store_clause_same_address
-    ; GCN: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
     ; XNACK-NEXT: S_NOP 0
-    ; GCN-NEXT: FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    ; GCN-NEXT: %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    ; GCN-NEXT: FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    ; GCN-NEXT: $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr10 = FLAT_LOAD_DWORD %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    FLAT_STORE_DWORD %vgpr0_vgpr1, %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr11 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr10 = FLAT_LOAD_DWORD $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    FLAT_STORE_DWORD $vgpr0_vgpr1, $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr11 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
index 8ab99c6..cf6b105 100644
--- a/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
+++ b/llvm/test/CodeGen/AMDGPU/clamp-omod-special-case.mir
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fold-operands  %s -o - | FileCheck -check-prefix=GCN %s
 ---
 # GCN-LABEL: name: v_max_self_clamp_not_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
 
 name:            v_max_self_clamp_not_set_f32
 tracksRegLiveness: true
@@ -35,37 +35,37 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: v_clamp_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN: %21:vgpr_32 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
 name:            v_clamp_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -97,38 +97,38 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MAX_F32_e64 0, killed %20, 0, killed %20, 1, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 ...
 ---
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_mul_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
 name:            v_omod_mul_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -160,30 +160,30 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 0, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -191,8 +191,8 @@
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_mul_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
 
 name:            v_omod_mul_clamp_already_set_f32
 tracksRegLiveness: true
@@ -225,30 +225,30 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_MUL_F32_e64 0, killed %20, 0, 1056964608, 1, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -269,8 +269,8 @@
 # Don't fold a mul that looks like an omod if itself has omod set
 
 # GCN-LABEL: name: v_omod_add_omod_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
 name:            v_omod_add_omod_already_set_f32
 tracksRegLiveness: true
 registers:
@@ -302,30 +302,30 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 0, 3, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -333,8 +333,8 @@
 # Don't fold a mul that looks like an omod if itself has clamp set
 # This might be OK, but would require folding the clamp at the same time.
 # GCN-LABEL: name: v_omod_add_clamp_already_set_f32
-# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
+# GCN: %20:vgpr_32 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+# GCN-NEXT: %21:vgpr_32 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
 
 name:            v_omod_add_clamp_already_set_f32
 tracksRegLiveness: true
@@ -367,30 +367,30 @@
   - { id: 25, class: vreg_64 }
   - { id: 26, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %24 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %24 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %25 = REG_SEQUENCE %3, 1, %24, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
     %14 = S_MOV_B32 2
-    %26 = V_LSHL_B64 killed %25, 2, implicit %exec
+    %26 = V_LSHL_B64 killed %25, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
     %18 = COPY %26
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit %exec
-    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit %exec
-    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %26, killed %13, 0, 0, 0, 0, 0, implicit $exec
+    %20 = V_ADD_F32_e64 0, killed %17, 0, 1065353216, 0, 0, implicit $exec
+    %21 = V_ADD_F32_e64 0, killed %20, 0, killed %20, 1, 0, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 killed %21, %26, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -404,9 +404,9 @@
   - { id: 1, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %0 = COPY %vgpr0
-    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit %exec
+    %0 = COPY $vgpr0
+    %1 = V_MAX_F32_e64 0, killed %0, 0, 1056964608, 1, 0, implicit $exec
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir
index 4c528b5..79dccbf 100644
--- a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir
+++ b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads-postra.mir
@@ -10,22 +10,22 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %vgpr0_vgpr1 = IMPLICIT_DEF
-    %vgpr4_vgpr5 = IMPLICIT_DEF
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr0_vgpr1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %vgpr4 = FLAT_LOAD_DWORD %vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %vgpr2 = IMPLICIT_DEF
-    %vgpr3 = IMPLICIT_DEF
-    %vgpr6 = IMPLICIT_DEF
-    %vgpr0 = V_ADD_I32_e32 16, %vgpr2, implicit-def %vcc, implicit %exec
-    %vgpr1 = V_ADDC_U32_e32 %vgpr3, killed %vgpr6, implicit-def dead %vcc, implicit %vcc, implicit %exec
-    FLAT_STORE_DWORD %vgpr2_vgpr3, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    FLAT_STORE_DWORD %vgpr0_vgpr1, killed %vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+    $vgpr0_vgpr1 = IMPLICIT_DEF
+    $vgpr4_vgpr5 = IMPLICIT_DEF
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr0_vgpr1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    $vgpr4 = FLAT_LOAD_DWORD $vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    $vgpr2 = IMPLICIT_DEF
+    $vgpr3 = IMPLICIT_DEF
+    $vgpr6 = IMPLICIT_DEF
+    $vgpr0 = V_ADD_I32_e32 16, $vgpr2, implicit-def $vcc, implicit $exec
+    $vgpr1 = V_ADDC_U32_e32 $vgpr3, killed $vgpr6, implicit-def dead $vcc, implicit $vcc, implicit $exec
+    FLAT_STORE_DWORD $vgpr2_vgpr3, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    FLAT_STORE_DWORD $vgpr0_vgpr1, killed $vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
index 50caba2..2ef4cb4 100644
--- a/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
+++ b/llvm/test/CodeGen/AMDGPU/cluster-flat-loads.mir
@@ -14,7 +14,7 @@
 body:             |
   bb.0:
     %0 = IMPLICIT_DEF
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %3 = FLAT_LOAD_DWORD %0, 4, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir b/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
index 234fe57..1a95d10 100644
--- a/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
+++ b/llvm/test/CodeGen/AMDGPU/coalescer-subreg-join.mir
@@ -22,9 +22,9 @@
   - { id: 20, class: vreg_512 }
   - { id: 27, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr2_sgpr3', virtual-reg: '%0' }
-  - { reg: '%vgpr2', virtual-reg: '%1' }
-  - { reg: '%vgpr3', virtual-reg: '%2' }
+  - { reg: '$sgpr2_sgpr3', virtual-reg: '%0' }
+  - { reg: '$vgpr2', virtual-reg: '%1' }
+  - { reg: '$vgpr3', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -41,11 +41,11 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr2_sgpr3, %vgpr2, %vgpr3
+    liveins: $sgpr2_sgpr3, $vgpr2, $vgpr3
 
-    %0 = COPY %sgpr2_sgpr3
-    %1 = COPY %vgpr2
-    %2 = COPY %vgpr3
+    %0 = COPY $sgpr2_sgpr3
+    %1 = COPY $vgpr2
+    %2 = COPY $vgpr3
     %3 = S_LOAD_DWORDX8_IMM %0, 0, 0
     %4 = S_LOAD_DWORDX4_IMM %0, 12, 0
     %5 = S_LOAD_DWORDX8_IMM %0, 16, 0
@@ -61,7 +61,7 @@
     %11.sub6 = COPY %1
     %11.sub7 = COPY %1
     %11.sub8 = COPY %1
-    dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec
+    dead %18 = IMAGE_SAMPLE_C_D_O_V1_V16 %11, %3, %4, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec
     %20.sub1 = COPY %2
     %20.sub2 = COPY %2
     %20.sub3 = COPY %2
@@ -70,6 +70,6 @@
     %20.sub6 = COPY %2
     %20.sub7 = COPY %2
     %20.sub8 = COPY %2
-    dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit %exec
+    dead %27 = IMAGE_SAMPLE_C_D_O_V1_V16 %20, %5, %6, 1, 0, 0, 0, 0, 0, 0, -1, implicit $exec
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
index d29c6af..e8e794f 100644
--- a/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/constant-fold-imm-immreg.mir
@@ -2,7 +2,7 @@
 ...
 
 # GCN-LABEL: name: s_fold_and_imm_regimm_32{{$}}
-# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit %exec
+# GCN: %10:vgpr_32 = V_MOV_B32_e32 1543, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %10,
 name:            s_fold_and_imm_regimm_32
 alignment:       0
@@ -24,7 +24,7 @@
   - { id: 9, class: sreg_32_xm0 }
   - { id: 10, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -41,9 +41,9 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %1 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %2 = COPY %1.sub1
     %3 = COPY %1.sub0
@@ -52,9 +52,9 @@
     %6 = REG_SEQUENCE killed %2, 1, killed %3, 2, killed %4, 3, killed %5, 4
     %7 = S_MOV_B32 1234567
     %8 = S_MOV_B32 9999
-    %9 = S_AND_B32 killed %7, killed %8, implicit-def dead %scc
+    %9 = S_AND_B32 killed %7, killed %8, implicit-def dead $scc
     %10 = COPY %9
-    BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %10, killed %6, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -62,19 +62,19 @@
 
 # GCN-LABEL: name: v_fold_and_imm_regimm_32{{$}}
 
-# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %9:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %9,
 
-# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %10:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %10
 
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 646, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 1234567, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 63, implicit $exec
 # GCN: FLAT_STORE_DWORD %19, %13,
 
 name:            v_fold_and_imm_regimm_32
@@ -108,8 +108,8 @@
   - { id: 44, class: vgpr_32 }
 
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -126,37 +126,37 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %31 = V_ASHRREV_I32_e64 31, %3, implicit %exec
+    %31 = V_ASHRREV_I32_e64 31, %3, implicit $exec
     %32 = REG_SEQUENCE %3, 1, %31, 2
-    %33 = V_LSHLREV_B64 2, killed %32, implicit %exec
+    %33 = V_LSHLREV_B64 2, killed %32, implicit $exec
     %20 = COPY %4.sub1
-    %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def %vcc, implicit %exec
+    %44 = V_ADD_I32_e32 %4.sub0, %33.sub0, implicit-def $vcc, implicit $exec
     %36 = COPY killed %20
-    %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def %vcc, implicit %vcc, implicit %exec
+    %35 = V_ADDC_U32_e32 %33.sub1, %36, implicit-def $vcc, implicit $vcc, implicit $exec
     %37 = REG_SEQUENCE %44, 1, killed %35, 2
-    %24 = V_MOV_B32_e32 982, implicit %exec
+    %24 = V_MOV_B32_e32 982, implicit $exec
     %26 = S_MOV_B32 1234567
-    %34 = V_MOV_B32_e32 63, implicit %exec
+    %34 = V_MOV_B32_e32 63, implicit $exec
 
-    %27 = V_AND_B32_e64 %26, %24, implicit %exec
-    FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %27 = V_AND_B32_e64 %26, %24, implicit $exec
+    FLAT_STORE_DWORD %37, %27, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_AND_B32_e64 %24, %26, implicit %exec
-    FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_AND_B32_e64 %24, %26, implicit $exec
+    FLAT_STORE_DWORD %37, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %29 = V_AND_B32_e32 %26, %24, implicit %exec
-    FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %29 = V_AND_B32_e32 %26, %24, implicit $exec
+    FLAT_STORE_DWORD %37, %29, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %30 = V_AND_B32_e64 %26, %26, implicit %exec
-    FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %30 = V_AND_B32_e64 %26, %26, implicit $exec
+    FLAT_STORE_DWORD %37, %30, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %31 = V_AND_B32_e64 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %31 = V_AND_B32_e64 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %37, %31, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -164,7 +164,7 @@
 ---
 
 # GCN-LABEL: name: s_fold_shl_imm_regimm_32{{$}}
-# GC1: %13 = V_MOV_B32_e32 4096, implicit %exec
+# GC1: %13 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %13,
 
 name:            s_fold_shl_imm_regimm_32
@@ -190,7 +190,7 @@
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -207,9 +207,9 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 1
     %6 = COPY %4.sub1
@@ -217,43 +217,43 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_LSHL_B32 killed %5, 12, implicit-def dead %scc
+    %12 = S_LSHL_B32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: v_fold_shl_imm_regimm_32{{$}}
 
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 40955904, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 24, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 24, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 4096, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 2, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 7927808, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 -8, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_shl_imm_regimm_32
@@ -294,8 +294,8 @@
   - { id: 27, class: sreg_32_xm0 }
   - { id: 28, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -312,54 +312,54 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 9999, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1, implicit %exec
+    %10 = V_MOV_B32_e32 9999, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1, implicit $exec
     %7 = S_MOV_B32 1
     %27 = S_MOV_B32 -4
 
-    %11 = V_LSHLREV_B32_e64 12, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_LSHLREV_B32_e64 12, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_LSHLREV_B32_e64 %7, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_LSHLREV_B32_e64 %7, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_LSHL_B32_e64 %7, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_LSHL_B32_e64 %7, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_LSHL_B32_e64 12, %7, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_LSHL_B32_e64 12, %7, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_LSHL_B32_e64 12, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_LSHL_B32_e64 12, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_LSHL_B32_e64 %6, 12, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_LSHL_B32_e64 %6, 12, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_LSHL_B32_e64 %6, 32, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_LSHL_B32_e64 %6, 32, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_LSHL_B32_e32 %6, %6, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_LSHL_B32_e32 %6, %6, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_LSHLREV_B32_e32 11, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_LSHLREV_B32_e32 11, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_LSHL_B32_e32 %27, %6, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_LSHL_B32_e32 %27, %6, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -367,7 +367,7 @@
 ---
 
 # GCN-LABEL: name: s_fold_ashr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 243, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8,
 name:            s_fold_ashr_imm_regimm_32
 alignment:       0
@@ -390,7 +390,7 @@
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -407,9 +407,9 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 999123
     %6 = COPY %4.sub1
@@ -417,42 +417,42 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_ASHR_I32 killed %5, 12, implicit-def dead %scc
+    %12 = S_ASHR_I32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 
 # GCN-LABEL: name: v_fold_ashr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_ashr_imm_regimm_32
@@ -497,8 +497,8 @@
   - { id: 34, class: vgpr_32 }
   - { id: 35, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -515,59 +515,59 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 999234234, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1000000, implicit %exec
+    %10 = V_MOV_B32_e32 999234234, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1000000, implicit $exec
     %7 = S_MOV_B32 13424252
     %8 = S_MOV_B32 4
     %27 = S_MOV_B32 -4
     %32 = S_MOV_B32 1
     %33 = S_MOV_B32 3841
-    %34 = V_MOV_B32_e32 3841, implicit %exec
-    %35 = V_MOV_B32_e32 2, implicit %exec
+    %34 = V_MOV_B32_e32 3841, implicit $exec
+    %35 = V_MOV_B32_e32 2, implicit $exec
 
-    %11 = V_ASHRREV_I32_e64 8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_ASHRREV_I32_e64 8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_ASHRREV_I32_e64 %8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_ASHRREV_I32_e64 %8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_ASHR_I32_e64 %7, 3, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_ASHR_I32_e64 %7, 3, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_ASHR_I32_e64 7, %32, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_ASHR_I32_e64 7, %32, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_ASHR_I32_e64 %27, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_ASHR_I32_e64 %27, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_ASHR_I32_e64 %6, 4, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_ASHR_I32_e64 %6, 4, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_ASHR_I32_e64 %6, %33, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_ASHR_I32_e64 %6, %33, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_ASHR_I32_e32 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_ASHR_I32_e32 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_ASHRREV_I32_e32 11, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_ASHRREV_I32_e32 11, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_ASHR_I32_e32 %27, %35, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_ASHR_I32_e32 %27, %35, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -575,7 +575,7 @@
 ---
 
 # GCN-LABEL: name: s_fold_lshr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 1048332, implicit $exec
 # GCN: BUFFER_STORE_DWORD_OFFSET killed %11, killed %8,
 name:            s_fold_lshr_imm_regimm_32
 alignment:       0
@@ -598,7 +598,7 @@
   - { id: 12, class: sreg_32_xm0 }
   - { id: 13, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -615,9 +615,9 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %0 = COPY %sgpr0_sgpr1
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 36, 0
     %5 = S_MOV_B32 -999123
     %6 = COPY %4.sub1
@@ -625,43 +625,43 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %6, 2, killed %9, 3, killed %8, 4
-    %12 = S_LSHR_B32 killed %5, 12, implicit-def dead %scc
+    %12 = S_LSHR_B32 killed %5, 12, implicit-def dead $scc
     %13 = COPY %12
-    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %13, killed %10, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 
 # GCN-LABEL: name: v_fold_lshr_imm_regimm_32{{$}}
-# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit %exec
+# GCN: %11:vgpr_32 = V_MOV_B32_e32 3903258, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %11,
 
-# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit %exec
+# GCN: %12:vgpr_32 = V_MOV_B32_e32 62452139, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %12,
 
-# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit %exec
+# GCN: %13:vgpr_32 = V_MOV_B32_e32 1678031, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %13,
 
-# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit %exec
+# GCN: %14:vgpr_32 = V_MOV_B32_e32 3, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %14,
 
-# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
+# GCN: %15:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %15,
 
-# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit %exec
+# GCN: %22:vgpr_32 = V_MOV_B32_e32 62500, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %22,
 
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 500000, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %23,
 
-# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit %exec
+# GCN: %25:vgpr_32 = V_MOV_B32_e32 1920, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %25,
 
-# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit %exec
+# GCN: %26:vgpr_32 = V_MOV_B32_e32 487907, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %26,
 
-# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit %exec
+# GCN: %28:vgpr_32 = V_MOV_B32_e32 1073741823, implicit $exec
 # GCN: FLAT_STORE_DWORD %20, %28,
 
 name:            v_fold_lshr_imm_regimm_32
@@ -706,8 +706,8 @@
   - { id: 34, class: vgpr_32 }
   - { id: 35, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -724,59 +724,59 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %2 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %2 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %3 = S_LOAD_DWORDX2_IMM %0, 36, 0
-    %15 = V_ASHRREV_I32_e64 31, %2, implicit %exec
+    %15 = V_ASHRREV_I32_e64 31, %2, implicit $exec
     %16 = REG_SEQUENCE %2, 1, %15, 2
-    %17 = V_LSHLREV_B64 2, killed %16, implicit %exec
+    %17 = V_LSHLREV_B64 2, killed %16, implicit $exec
     %9 = COPY %3.sub1
-    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def %vcc, implicit %exec
+    %21 = V_ADD_I32_e32 %3.sub0, %17.sub0, implicit-def $vcc, implicit $exec
     %19 = COPY killed %9
-    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def %vcc, implicit %vcc, implicit %exec
+    %18 = V_ADDC_U32_e32 %17.sub1, %19, implicit-def $vcc, implicit $vcc, implicit $exec
     %20 = REG_SEQUENCE %21, 1, killed %18, 2
-    %10 = V_MOV_B32_e32 999234234, implicit %exec
-    %24 = V_MOV_B32_e32 3871, implicit %exec
-    %6 = V_MOV_B32_e32 1000000, implicit %exec
+    %10 = V_MOV_B32_e32 999234234, implicit $exec
+    %24 = V_MOV_B32_e32 3871, implicit $exec
+    %6 = V_MOV_B32_e32 1000000, implicit $exec
     %7 = S_MOV_B32 13424252
     %8 = S_MOV_B32 4
     %27 = S_MOV_B32 -4
     %32 = S_MOV_B32 1
     %33 = S_MOV_B32 3841
-    %34 = V_MOV_B32_e32 3841, implicit %exec
-    %35 = V_MOV_B32_e32 2, implicit %exec
+    %34 = V_MOV_B32_e32 3841, implicit $exec
+    %35 = V_MOV_B32_e32 2, implicit $exec
 
-    %11 = V_LSHRREV_B32_e64 8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %11 = V_LSHRREV_B32_e64 8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %11, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %12 = V_LSHRREV_B32_e64 %8, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %12 = V_LSHRREV_B32_e64 %8, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %12, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %13 = V_LSHR_B32_e64 %7, 3, implicit %exec
-    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %13 = V_LSHR_B32_e64 %7, 3, implicit $exec
+    FLAT_STORE_DWORD %20, %13, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %14 = V_LSHR_B32_e64 7, %32, implicit %exec
-    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %14 = V_LSHR_B32_e64 7, %32, implicit $exec
+    FLAT_STORE_DWORD %20, %14, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %15 = V_LSHR_B32_e64 %27, %24, implicit %exec
-    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %15 = V_LSHR_B32_e64 %27, %24, implicit $exec
+    FLAT_STORE_DWORD %20, %15, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %22 = V_LSHR_B32_e64 %6, 4, implicit %exec
-    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %22 = V_LSHR_B32_e64 %6, 4, implicit $exec
+    FLAT_STORE_DWORD %20, %22, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %23 = V_LSHR_B32_e64 %6, %33, implicit %exec
-    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %23 = V_LSHR_B32_e64 %6, %33, implicit $exec
+    FLAT_STORE_DWORD %20, %23, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %25 = V_LSHR_B32_e32 %34, %34, implicit %exec
-    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %25 = V_LSHR_B32_e32 %34, %34, implicit $exec
+    FLAT_STORE_DWORD %20, %25, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %26 = V_LSHRREV_B32_e32 11, %10, implicit %exec
-    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %26 = V_LSHRREV_B32_e32 11, %10, implicit $exec
+    FLAT_STORE_DWORD %20, %26, 0, 0, 0, implicit $exec, implicit $flat_scr
 
-    %28 = V_LSHR_B32_e32 %27, %35, implicit %exec
-    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %28 = V_LSHR_B32_e32 %27, %35, implicit $exec
+    FLAT_STORE_DWORD %20, %28, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     S_ENDPGM
 
@@ -798,9 +798,9 @@
   - { id: 3, class: vreg_64, preferred-register: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_XOR_B32_e64 killed %0, undef %1, implicit %exec
-    FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_XOR_B32_e64 killed %0, undef %1, implicit $exec
+    FLAT_STORE_DWORD undef %3, %2, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/dead_copy.mir b/llvm/test/CodeGen/AMDGPU/dead_copy.mir
index d581b29..ccdcba4 100644
--- a/llvm/test/CodeGen/AMDGPU/dead_copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/dead_copy.mir
@@ -2,8 +2,8 @@
 
 # GCN-LABEL: dead_copy
 # GCN:       bb.0
-# GCN-NOT:   dead %vgpr5 = COPY undef %vgpr11, implicit %exec
-# GCN:       %vgpr5 = COPY %vgpr11, implicit %exec
+# GCN-NOT:   dead $vgpr5 = COPY undef $vgpr11, implicit $exec
+# GCN:       $vgpr5 = COPY $vgpr11, implicit $exec
 
 ---
 name: dead_copy
@@ -11,17 +11,17 @@
 body:    |
 
   bb.0:
-    liveins: %vgpr11, %sgpr0, %sgpr1, %vgpr6, %vgpr7, %vgpr4
+    liveins: $vgpr11, $sgpr0, $sgpr1, $vgpr6, $vgpr7, $vgpr4
 
-    dead %vgpr5 = COPY undef %vgpr11, implicit %exec
+    dead $vgpr5 = COPY undef $vgpr11, implicit $exec
 
-    %vgpr5 = COPY %vgpr11, implicit %exec
+    $vgpr5 = COPY $vgpr11, implicit $exec
 
-    %sgpr14 = S_ADD_U32 %sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def %scc
-    %sgpr15 = S_ADDC_U32 %sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead %scc, implicit %scc
+    $sgpr14 = S_ADD_U32 $sgpr0, target-flags(amdgpu-gotprel) 1136, implicit-def $scc
+    $sgpr15 = S_ADDC_U32 $sgpr1, target-flags(amdgpu-gotprel32-lo) 0, implicit-def dead $scc, implicit $scc
 
-    %vgpr10 = COPY killed %sgpr14, implicit %exec
-    %vgpr11 = COPY killed %sgpr15, implicit %exec
+    $vgpr10 = COPY killed $sgpr14, implicit $exec
+    $vgpr11 = COPY killed $sgpr15, implicit $exec
 
-    FLAT_STORE_DWORDX4 %vgpr10_vgpr11, %vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX4 $vgpr10_vgpr11, $vgpr4_vgpr5_vgpr6_vgpr7, 0, 0, 0, implicit $exec, implicit $flat_scr
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/debug-value2.ll b/llvm/test/CodeGen/AMDGPU/debug-value2.ll
index ce36d6c..8ecf897 100644
--- a/llvm/test/CodeGen/AMDGPU/debug-value2.ll
+++ b/llvm/test/CodeGen/AMDGPU/debug-value2.ll
@@ -10,9 +10,9 @@
 
 define <4 x float> @Scene_transformT(i32 %subshapeIdx, <4 x float> %v, float %time, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets) local_unnamed_addr !dbg !110 {
 entry:
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr6_vgpr7
+; CHECK: ;DEBUG_VALUE: Scene_transformT:gScene <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr6_vgpr7
   call void @llvm.dbg.value(metadata i8 addrspace(1)* %gScene, metadata !120, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !154
-; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] %vgpr8_vgpr9
+; CHECK: ;DEBUG_VALUE: Scene_transformT:gSceneOffsets <- [DW_OP_constu 1, DW_OP_swap, DW_OP_xderef] $vgpr8_vgpr9
   call void @llvm.dbg.value(metadata i32 addrspace(1)* %gSceneOffsets, metadata !121, metadata !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef)), !dbg !155
   %call = tail call %struct.ShapeData addrspace(1)* @Scene_getSubShapeData(i32 %subshapeIdx, i8 addrspace(1)* %gScene, i32 addrspace(1)* %gSceneOffsets)
   %m_linearMotion = getelementptr inbounds %struct.ShapeData, %struct.ShapeData addrspace(1)* %call, i64 0, i32 2
diff --git a/llvm/test/CodeGen/AMDGPU/detect-dead-lanes.mir b/llvm/test/CodeGen/AMDGPU/detect-dead-lanes.mir
index 12460d2..b035977 100644
--- a/llvm/test/CodeGen/AMDGPU/detect-dead-lanes.mir
+++ b/llvm/test/CodeGen/AMDGPU/detect-dead-lanes.mir
@@ -42,9 +42,9 @@
 # Check defined lanes transfer; Includes checking for some special cases like
 # undef operands or IMPLICIT_DEF definitions.
 # CHECK-LABEL: name: test1
-# CHECK: %0:sreg_128 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2
-# CHECK: %1:sreg_128 = INSERT_SUBREG %0, %sgpr1,  %subreg.sub3
-# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, %sgpr42,  %subreg.sub0
+# CHECK: %0:sreg_128 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2
+# CHECK: %1:sreg_128 = INSERT_SUBREG %0, $sgpr1,  %subreg.sub3
+# CHECK: %2:sreg_64 = INSERT_SUBREG %0.sub2_sub3, $sgpr42,  %subreg.sub0
 # CHECK: S_NOP 0, implicit %1.sub0
 # CHECK: S_NOP 0, implicit undef %1.sub1
 # CHECK: S_NOP 0, implicit %1.sub2
@@ -87,9 +87,9 @@
   - { id: 10, class: sreg_128 }
 body: |
   bb.0:
-    %0 = REG_SEQUENCE %sgpr0, %subreg.sub0, %sgpr0, %subreg.sub2
-    %1 = INSERT_SUBREG %0, %sgpr1, %subreg.sub3
-    %2 = INSERT_SUBREG %0.sub2_sub3, %sgpr42, %subreg.sub0
+    %0 = REG_SEQUENCE $sgpr0, %subreg.sub0, $sgpr0, %subreg.sub2
+    %1 = INSERT_SUBREG %0, $sgpr1, %subreg.sub3
+    %2 = INSERT_SUBREG %0.sub2_sub3, $sgpr42, %subreg.sub0
     S_NOP 0, implicit %1.sub0
     S_NOP 0, implicit %1.sub1
     S_NOP 0, implicit %1.sub2
@@ -204,8 +204,8 @@
 # lanes. So we should not get a dead/undef flag here.
 # CHECK-LABEL: name: test3
 # CHECK: S_NOP 0, implicit-def %0
-# CHECK: %vcc = COPY %0
-# CHECK: %1:sreg_64 = COPY %vcc
+# CHECK: $vcc = COPY %0
+# CHECK: %1:sreg_64 = COPY $vcc
 # CHECK: S_NOP 0, implicit %1
 name: test3
 tracksRegLiveness: true
@@ -215,9 +215,9 @@
 body: |
   bb.0:
     S_NOP 0, implicit-def %0
-    %vcc = COPY %0
+    $vcc = COPY %0
 
-    %1 = COPY %vcc
+    %1 = COPY $vcc
     S_NOP 0, implicit %1
 ...
 ---
@@ -296,7 +296,7 @@
     ; let's swiffle some lanes around for fun...
     %5 = REG_SEQUENCE %4.sub0, %subreg.sub0, %4.sub2, %subreg.sub1, %4.sub1, %subreg.sub2, %4.sub3, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:
@@ -349,7 +349,7 @@
     ; rotate lanes, but skip sub2 lane...
     %6 = REG_SEQUENCE %5.sub1, %subreg.sub0, %5.sub3, %subreg.sub1, %5.sub2, %subreg.sub2, %5.sub0, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:
@@ -392,7 +392,7 @@
     ; rotate subreg lanes, skipping sub1
     %3 = REG_SEQUENCE %2.sub3, %subreg.sub0, %2.sub1, %subreg.sub1, %2.sub0, %subreg.sub2, %2.sub2, %subreg.sub3
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.2:
diff --git a/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir b/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
index 9833cc1..7b81060 100644
--- a/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
+++ b/llvm/test/CodeGen/AMDGPU/endpgm-dce.mir
@@ -13,19 +13,19 @@
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: load_without_memoperand
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr
 # GCN-NEXT: S_ENDPGM
 name: load_without_memoperand
 tracksRegLiveness: true
@@ -37,19 +37,19 @@
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: load_volatile
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4)
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: dead %1:vgpr_32 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4)
 # GCN-NEXT: S_ENDPGM
 name: load_volatile
 tracksRegLiveness: true
@@ -61,19 +61,19 @@
   - { id: 4, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %3 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile load 4)
-    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit %exec
-    %4 = S_ADD_U32 %3, 1, implicit-def %scc
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    %1 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile load 4)
+    %2 = V_ADD_F32_e64 0, killed %1, 0, 1, 0, 0, implicit $exec
+    %4 = S_ADD_U32 %3, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: store
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
 # GCN-NEXT: S_ENDPGM
 name: store
 tracksRegLiveness: true
@@ -82,45 +82,45 @@
   - { id: 1, class: vgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    FLAT_STORE_DWORD %0, %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: barrier
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 # GCN-NEXT: S_BARRIER
 # GCN-NEXT: S_ENDPGM
 name: barrier
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BARRIER
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: call
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3
 # GCN-NEXT: S_ENDPGM
 name: call
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %sgpr4_sgpr5 = S_SWAPPC_B64 %sgpr2_sgpr3
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    $sgpr4_sgpr5 = S_SWAPPC_B64 $sgpr2_sgpr3
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: exp
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit %exec
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: EXP 32, undef %0:vgpr_32, undef %1:vgpr_32, %2, undef %3:vgpr_32, 0, 0, 15, implicit $exec
 # GCN-NEXT: S_ENDPGM
 name: exp
 tracksRegLiveness: true
@@ -131,24 +131,24 @@
   - { id: 3, class: vgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
+    $vcc = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit %exec
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    EXP 32, undef %0, undef %1, killed %2, undef %3, 0, 0, 15, implicit $exec
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: return_to_epilog
-# GCN:      %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-# GCN-NEXT: SI_RETURN_TO_EPILOG killed %vgpr0
+# GCN:      $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+# GCN-NEXT: SI_RETURN_TO_EPILOG killed $vgpr0
 name: return_to_epilog
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %vgpr0 = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    SI_RETURN_TO_EPILOG killed %vgpr0
+    $vcc = IMPLICIT_DEF
+    $vgpr0 = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    SI_RETURN_TO_EPILOG killed $vgpr0
 ...
 ---
 # GCN-LABEL: name: split_block
@@ -166,14 +166,14 @@
   - { id: 3, class: sgpr_32 }
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 
   bb.1:
     %0 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit %exec
-    %3 = S_ADD_U32 %2, 1, implicit-def %scc
+    %1 = V_ADD_F32_e64 0, killed %0, 0, 1, 0, 0, implicit $exec
+    %3 = S_ADD_U32 %2, 1, implicit-def $scc
     S_ENDPGM
 ...
 ---
@@ -188,8 +188,8 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 
   bb.1:
 
@@ -208,8 +208,8 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.1
 
   bb.1:
@@ -219,8 +219,8 @@
 # GCN-LABEL: name: split_block_cond_branch
 # GCN:      bb.0:
 # GCN-NEXT:   successors: %bb.2(0x40000000), %bb.1(0x40000000)
-# GCN:        %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc
-# GCN:        S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+# GCN:        $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc
+# GCN:        S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 # GCN:      bb.1:
 # GCN:      bb.2:
 # GCN-NEXT:   S_ENDPGM
@@ -228,9 +228,9 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, %vcc, implicit-def %scc
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, $vcc, implicit-def $scc
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 
   bb.1:
 
@@ -253,13 +253,13 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.1:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.2:
@@ -269,7 +269,7 @@
 # GCN-LABEL: name: two_preds_one_dead
 # GCN:      bb.0:
 # GCN-NEXT:   successors: %bb.2
-# GCN:        %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+# GCN:        $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
 # GCN-NEXT:   S_BARRIER
 # GCN-NEXT:   S_BRANCH %bb.2
 # GCN:      bb.1:
@@ -282,14 +282,14 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
     S_BARRIER
     S_BRANCH %bb.2
 
   bb.1:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
     S_BRANCH %bb.2
 
   bb.2:
diff --git a/llvm/test/CodeGen/AMDGPU/fix-vgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
index 4951e0d..c53ff28 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-vgpr-copies.mir
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -start-after=greedy -stop-after=si-optimize-exec-masking -o - %s | FileCheck %s
 # Check that we first do all vector instructions and only then change exec
-# CHECK-DAG:  COPY %vgpr10_vgpr11
-# CHECK-DAG:  COPY %vgpr12_vgpr13
-# CHECK:      %exec = COPY
+# CHECK-DAG:  COPY $vgpr10_vgpr11
+# CHECK-DAG:  COPY $vgpr12_vgpr13
+# CHECK:      $exec = COPY
 
 ---
 name:            main
@@ -13,9 +13,9 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr4_sgpr5' }
-  - { reg: '%sgpr6' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr4_sgpr5' }
+  - { reg: '$sgpr6' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -32,13 +32,13 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %vgpr3, %vgpr10_vgpr11, %vgpr12_vgpr13
+    liveins: $vgpr3, $vgpr10_vgpr11, $vgpr12_vgpr13
 
-    %vcc = V_CMP_NE_U32_e64 0, killed %vgpr3, implicit %exec
-    %sgpr4_sgpr5 = COPY %exec, implicit-def %exec
-    %sgpr6_sgpr7 = S_AND_B64 %sgpr4_sgpr5, killed %vcc, implicit-def dead %scc
-    %sgpr4_sgpr5 = S_XOR_B64 %sgpr6_sgpr7, killed %sgpr4_sgpr5, implicit-def dead %scc
-    %vgpr61_vgpr62 = COPY %vgpr10_vgpr11
-    %vgpr155_vgpr156 = COPY %vgpr12_vgpr13
-    %exec = S_MOV_B64_term killed %sgpr6_sgpr7
+    $vcc = V_CMP_NE_U32_e64 0, killed $vgpr3, implicit $exec
+    $sgpr4_sgpr5 = COPY $exec, implicit-def $exec
+    $sgpr6_sgpr7 = S_AND_B64 $sgpr4_sgpr5, killed $vcc, implicit-def dead $scc
+    $sgpr4_sgpr5 = S_XOR_B64 $sgpr6_sgpr7, killed $sgpr4_sgpr5, implicit-def dead $scc
+    $vgpr61_vgpr62 = COPY $vgpr10_vgpr11
+    $vgpr155_vgpr156 = COPY $vgpr12_vgpr13
+    $exec = S_MOV_B64_term killed $sgpr6_sgpr7
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fix-wwm-liveness.mir b/llvm/test/CodeGen/AMDGPU/fix-wwm-liveness.mir
index 101ba00..0acf154 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-wwm-liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/fix-wwm-liveness.mir
@@ -1,5 +1,5 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-fix-wwm-liveness -o -  %s | FileCheck %s
-#CHECK: %exec = EXIT_WWM killed %19, implicit %21
+#CHECK: $exec = EXIT_WWM killed %19, implicit %21
 
 ---
 name:            test_wwm_liveness
@@ -18,7 +18,7 @@
   - { id: 5, class: vgpr_32, preferred-register: '' }
   - { id: 6, class: vgpr_32, preferred-register: '' }
   - { id: 7, class: vgpr_32, preferred-register: '' }
-  - { id: 8, class: sreg_64, preferred-register: '%vcc' }
+  - { id: 8, class: sreg_64, preferred-register: '$vcc' }
   - { id: 9, class: sreg_64, preferred-register: '' }
   - { id: 10, class: sreg_32_xm0, preferred-register: '' }
   - { id: 11, class: sreg_64, preferred-register: '' }
@@ -39,15 +39,15 @@
   bb.0:
     successors: %bb.1(0x40000000), %bb.2(0x40000000)
   
-    %21 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit %exec
-    %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit %exec
-    %8 = V_CMP_GT_U32_e64 32, killed %6, implicit %exec
-    %22 = COPY %exec, implicit-def %exec
-    %23 = S_AND_B64 %22, %8, implicit-def dead %scc
-    %0 = S_XOR_B64 %23, %22, implicit-def dead %scc
-    %exec = S_MOV_B64_term killed %23
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    %21 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_MBCNT_LO_U32_B32_e64 -1, 0, implicit $exec
+    %6 = V_MBCNT_HI_U32_B32_e32 -1, killed %5, implicit $exec
+    %8 = V_CMP_GT_U32_e64 32, killed %6, implicit $exec
+    %22 = COPY $exec, implicit-def $exec
+    %23 = S_AND_B64 %22, %8, implicit-def dead $scc
+    %0 = S_XOR_B64 %23, %22, implicit-def dead $scc
+    $exec = S_MOV_B64_term killed %23
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
   
   bb.1:
@@ -56,18 +56,18 @@
     %13 = S_MOV_B32 61440
     %14 = S_MOV_B32 -1
     %15 = REG_SEQUENCE undef %12, 1, undef %10, 2, killed %14, 3, killed %13, 4
-    %19 = COPY %exec
-    %exec = S_MOV_B64 -1
-    %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4)
-    %17 = V_ADD_F32_e32 1065353216, killed %16, implicit %exec
-    %exec = EXIT_WWM killed %19
-    %21 = V_MOV_B32_e32 1, implicit %exec
-    early-clobber %18 = WWM killed %17, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit %exec :: (store 4)
+    %19 = COPY $exec
+    $exec = S_MOV_B64 -1
+    %16 = BUFFER_LOAD_DWORD_OFFSET %15, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4)
+    %17 = V_ADD_F32_e32 1065353216, killed %16, implicit $exec
+    $exec = EXIT_WWM killed %19
+    %21 = V_MOV_B32_e32 1, implicit $exec
+    early-clobber %18 = WWM killed %17, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %18, killed %15, 0, 0, 0, 0, 0, implicit $exec :: (store 4)
   
   bb.2:
-    %exec = S_OR_B64 %exec, killed %0, implicit-def %scc
-    %vgpr0 = COPY killed %21
-    SI_RETURN_TO_EPILOG killed %vgpr0
+    $exec = S_OR_B64 $exec, killed %0, implicit-def $scc
+    $vgpr0 = COPY killed %21
+    SI_RETURN_TO_EPILOG killed $vgpr0
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
index 7154724..c629798 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
+++ b/llvm/test/CodeGen/AMDGPU/flat-load-clustering.mir
@@ -46,32 +46,32 @@
   - { id: 12, class: vreg_64 }
   - { id: 13, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' }
 body:             |
   bb.0.bb:
-    liveins: %vgpr0, %sgpr4_sgpr5
+    liveins: $vgpr0, $sgpr4_sgpr5
 
-    %1 = COPY %sgpr4_sgpr5
-    %0 = COPY %vgpr0
+    %1 = COPY $sgpr4_sgpr5
+    %0 = COPY $vgpr0
     %3 = S_LOAD_DWORDX2_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %4 = S_LOAD_DWORDX2_IMM %1, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %7 = V_LSHLREV_B32_e32 2, %0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def %vcc, implicit %exec
+    %7 = V_LSHLREV_B32_e32 2, %0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    undef %12.sub0 = V_ADD_I32_e32 %4.sub0, %7, implicit-def $vcc, implicit $exec
     %11 = COPY %4.sub1
-    %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep1)
-    undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def %vcc, implicit %exec
+    %12.sub1 = V_ADDC_U32_e32 %11, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    %5 = FLAT_LOAD_DWORD %12, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep1)
+    undef %9.sub0 = V_ADD_I32_e32 %3.sub0, %7, implicit-def $vcc, implicit $exec
     %8 = COPY %3.sub1
-    %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def %vcc, implicit %exec
-    %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.gep34)
-    undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def %vcc, implicit %exec
-    %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead %vcc, implicit killed %vcc, implicit %exec
-    FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep2)
-    FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.gep4)
+    %9.sub1 = V_ADDC_U32_e32 %8, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    undef %13.sub0 = V_ADD_I32_e32 16, %12.sub0, implicit-def $vcc, implicit $exec
+    %13.sub1 = V_ADDC_U32_e32 %12.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    %6 = FLAT_LOAD_DWORD %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.gep34)
+    undef %10.sub0 = V_ADD_I32_e32 16, %9.sub0, implicit-def $vcc, implicit $exec
+    %10.sub1 = V_ADDC_U32_e32 %9.sub1, %2, implicit-def dead $vcc, implicit killed $vcc, implicit $exec
+    FLAT_STORE_DWORD %9, %5, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep2)
+    FLAT_STORE_DWORD %10, %6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.gep4)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-cndmask.mir b/llvm/test/CodeGen/AMDGPU/fold-cndmask.mir
index 1ddb02a..ae7bdbe 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-cndmask.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-cndmask.mir
@@ -1,10 +1,10 @@
 # RUN: llc -march=amdgcn -run-pass si-fold-operands -verify-machineinstrs -o - %s | FileCheck %s
 
-# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# CHECK: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # CHECK: %4:vgpr_32 = COPY %3
-# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# CHECK: %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# CHECK: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # CHECK: %7:vgpr_32 = COPY %3
 
 ---
@@ -22,13 +22,13 @@
 body:             |
   bb.0.entry:
     %0 = IMPLICIT_DEF
-    %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit %exec
-    %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit %exec
+    %1 = V_CNDMASK_B32_e64 0, 0, %0, implicit $exec
+    %2 = V_CNDMASK_B32_e64 %1, %1, %0, implicit $exec
     %3 = IMPLICIT_DEF
-    %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit %exec
+    %4 = V_CNDMASK_B32_e64 %3, %3, %0, implicit $exec
     %5 = COPY %1
-    %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit %exec
-    %vcc = IMPLICIT_DEF
-    %7 = V_CNDMASK_B32_e32 %3, %3, implicit %exec, implicit %vcc
+    %6 = V_CNDMASK_B32_e64 %5, 0, %0, implicit $exec
+    $vcc = IMPLICIT_DEF
+    %7 = V_CNDMASK_B32_e32 %3, %3, implicit $exec, implicit $vcc
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
index cae8ed8..412190a 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-imm-f16-f32.mir
@@ -111,7 +111,7 @@
 #  literal constant.
 
 # CHECK-LABEL: name: add_f32_1.0_one_f16_use
-# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit %exec
+# CHECK: %13:vgpr_32 = V_ADD_F16_e32  1065353216, killed %11, implicit $exec
 
 name:            add_f32_1.0_one_f16_use
 alignment:       0
@@ -158,10 +158,10 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = V_MOV_B32_e32 1065353216, implicit %exec
-    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = V_MOV_B32_e32 1065353216, implicit $exec
+    %13 = V_ADD_F16_e64 0, killed %11, 0, %12, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %13, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -170,9 +170,9 @@
 # operands
 
 # CHECK-LABEL: name: add_f32_1.0_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 killed %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 killed %12, killed %13, implicit $exec
 
 
 name:            add_f32_1.0_multi_f16_use
@@ -222,13 +222,13 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 1065353216, implicit %exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 1065353216, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -238,8 +238,8 @@
 #  immediate, and folded into the single f16 use as a literal constant
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_one_f16_use
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1065353216, %11, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_one_f16_use
 alignment:       0
@@ -289,14 +289,14 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 1065353216, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 1065353216, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -306,10 +306,10 @@
 #  constant, and not folded as a multi-use literal for the f16 cases
 
 # CHECK-LABEL: name: add_f32_1.0_one_f32_use_multi_f16_use
-# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit %exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit %exec
+# CHECK: %14:vgpr_32 = V_MOV_B32_e32 1065353216, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32  %11, %14, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 %12,  %14, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 1065353216, killed %13, implicit $exec
 
 name:            add_f32_1.0_one_f32_use_multi_f16_use
 alignment:       0
@@ -360,24 +360,24 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 1065353216, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 1065353216, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: add_i32_1_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 1, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 1, killed %11, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 1, killed %12, implicit $exec
 
 
 name:            add_i32_1_multi_f16_use
@@ -427,23 +427,23 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 1, implicit %exec
-    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 1, implicit $exec
+    %14 = V_ADD_F16_e64 0, killed %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, killed %12, 0, killed %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 
 # CHECK-LABEL: name: add_i32_m2_one_f32_use_multi_f16_use
-# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit %exec
-# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit %exec
-# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit %exec
+# CHECK: %14:vgpr_32 = V_MOV_B32_e32 -2, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 -2, %11, implicit $exec
+# CHECK: %16:vgpr_32 = V_ADD_F16_e32 -2, %12, implicit $exec
+# CHECK: %17:vgpr_32 = V_ADD_F32_e32 -2, killed %13, implicit $exec
 
 name:            add_i32_m2_one_f32_use_multi_f16_use
 alignment:       0
@@ -494,16 +494,16 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %14 = V_MOV_B32_e32 -2, implicit %exec
-    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit %exec
-    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit %exec
-    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %14 = V_MOV_B32_e32 -2, implicit $exec
+    %15 = V_ADD_F16_e64 0, %11, 0, %14, 0, 0, implicit $exec
+    %16 = V_ADD_F16_e64 0, %12, 0, %14, 0, 0, implicit $exec
+    %17 = V_ADD_F32_e64 0, killed %13, 0, killed %14, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %16, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %17, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -513,9 +513,9 @@
 #  constant, and not folded as a multi-use literal for the f16 cases
 
 # CHECK-LABEL: name: add_f16_1.0_multi_f32_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 15360, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F32_e32 %12, %13, implicit $exec
 
 name:            add_f16_1.0_multi_f32_use
 alignment:       0
@@ -564,13 +564,13 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 15360, implicit %exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
-    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 15360, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F32_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    BUFFER_STORE_DWORD_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -580,9 +580,9 @@
 # FIXME: Should be able to fold this
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_multi_f16_use
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 80886784, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F16_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
 
 name:            add_f16_1.0_other_high_bits_multi_f16_use
 alignment:       0
@@ -631,13 +631,13 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 80886784, implicit %exec
-    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 80886784, implicit $exec
+    %14 = V_ADD_F16_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_SHORT_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
@@ -647,9 +647,9 @@
 # f32 instruction.
 
 # CHECK-LABEL: name: add_f16_1.0_other_high_bits_use_f16_f32
-# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit %exec
-# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit %exec
-# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit %exec
+# CHECK: %13:vgpr_32 = V_MOV_B32_e32 305413120, implicit $exec
+# CHECK: %14:vgpr_32 = V_ADD_F32_e32 %11, %13, implicit $exec
+# CHECK: %15:vgpr_32 = V_ADD_F16_e32 %12, %13, implicit $exec
 name:            add_f16_1.0_other_high_bits_use_f16_f32
 alignment:       0
 exposesReturnsTwice: false
@@ -697,13 +697,13 @@
     %8 = S_MOV_B32 61440
     %9 = S_MOV_B32 -1
     %10 = REG_SEQUENCE killed %7, 1, killed %5, 2, killed %9, 3, killed %8, 4
-    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 2 from `half addrspace(1)* undef`)
-    %13 = V_MOV_B32_e32 305413120, implicit %exec
-    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit %exec
-    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `float addrspace(1)* undef`)
-    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 2 into `half addrspace(1)* undef`)
+    %11 = BUFFER_LOAD_DWORD_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    %12 = BUFFER_LOAD_USHORT_OFFSET %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 2 from `half addrspace(1)* undef`)
+    %13 = V_MOV_B32_e32 305413120, implicit $exec
+    %14 = V_ADD_F32_e64 0, %11, 0, %13, 0, 0, implicit $exec
+    %15 = V_ADD_F16_e64 0, %12, 0, %13, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed %14, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `float addrspace(1)* undef`)
+    BUFFER_STORE_SHORT_OFFSET killed %15, %10, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 2 into `half addrspace(1)* undef`)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir b/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
index 9831538..d37964a 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-immediate-output-mods.mir
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -run-pass peephole-opt -verify-machineinstrs %s -o - | FileCheck -check-prefix=GCN %s
 ...
 # GCN-LABEL: name: no_fold_imm_madak_mac_clamp_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN-NEXT: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
 
 name:            no_fold_imm_madak_mac_clamp_f32
 tracksRegLiveness: true
@@ -38,42 +38,42 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: no_fold_imm_madak_mac_omod_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
 
 name:            no_fold_imm_madak_mac_omod_f32
 tracksRegLiveness: true
@@ -109,42 +109,42 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAC_F32_e64 0, killed %19, 0, killed %21, 0, %23, 0, 2, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN: name: no_fold_imm_madak_mad_clamp_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
 
 name:            no_fold_imm_madak_mad_clamp_f32
 tracksRegLiveness: true
@@ -180,42 +180,42 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 1, 0, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN: name: no_fold_imm_madak_mad_omod_f32
-# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit %exec
-# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+# GCN: %23:vgpr_32 = V_MOV_B32_e32 1090519040, implicit $exec
+# GCN: %24:vgpr_32 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
 
 name:            no_fold_imm_madak_mad_omod_f32
 tracksRegLiveness: true
@@ -251,35 +251,35 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
     %6 = S_LOAD_DWORDX2_IMM %0, 13, 0
-    %27 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %27 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %28 = REG_SEQUENCE %3, 1, %27, 2
     %11 = S_MOV_B32 61440
     %12 = S_MOV_B32 0
     %13 = REG_SEQUENCE killed %12, 1, killed %11, 2
     %14 = REG_SEQUENCE killed %5, 17, %13, 18
     %15 = S_MOV_B32 2
-    %29 = V_LSHL_B64 killed %28, killed %15, implicit %exec
+    %29 = V_LSHL_B64 killed %28, killed %15, implicit $exec
     %17 = REG_SEQUENCE killed %6, 17, %13, 18
     %18 = REG_SEQUENCE killed %4, 17, %13, 18
     %20 = COPY %29
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit %exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %20, killed %14, 0, 0, 0, 0, 0, implicit $exec
     %22 = COPY %29
-    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit %exec
-    %23 = V_MOV_B32_e32 1090519040, implicit %exec
-    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit %exec
+    %21 = BUFFER_LOAD_DWORD_ADDR64 %22, killed %17, 0, 0, 0, 0, 0, implicit $exec
+    %23 = V_MOV_B32_e32 1090519040, implicit $exec
+    %24 = V_MAD_F32 0, killed %19, 0, killed %21, 0, %23, 0, 1, implicit $exec
     %26 = COPY %29
-    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %24, %26, killed %18, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-multiple.mir b/llvm/test/CodeGen/AMDGPU/fold-multiple.mir
index b9b6ee6..fa84f92 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-multiple.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-multiple.mir
@@ -14,8 +14,8 @@
 # being processed twice.
 
 # CHECK-LABEL: name: test
-# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit %exec
-# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit %exec
+# CHECK: %2:vgpr_32 = V_LSHLREV_B32_e32 2, killed %0, implicit $exec
+# CHECK: %4:vgpr_32 = V_AND_B32_e32 8, killed %2, implicit $exec
 
 name:            test
 tracksRegLiveness: true
@@ -30,11 +30,11 @@
   bb.0 (%ir-block.0):
     %0 = IMPLICIT_DEF
     %1 = S_MOV_B32 2
-    %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit %exec
-    %3 = S_LSHL_B32 %1, killed %1, implicit-def dead %scc
-    %4 = V_AND_B32_e64 killed %2, killed %3, implicit %exec
+    %2 = V_LSHLREV_B32_e64 %1, killed %0, implicit $exec
+    %3 = S_LSHL_B32 %1, killed %1, implicit-def dead $scc
+    %4 = V_AND_B32_e64 killed %2, killed %3, implicit $exec
     %5 = IMPLICIT_DEF
-    BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET killed %4, killed %5, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/fold-operands-order.mir b/llvm/test/CodeGen/AMDGPU/fold-operands-order.mir
index 3f28f39..a4ded7f 100644
--- a/llvm/test/CodeGen/AMDGPU/fold-operands-order.mir
+++ b/llvm/test/CodeGen/AMDGPU/fold-operands-order.mir
@@ -6,10 +6,10 @@
 # aren't made in users before the def is seen.
 
 # GCN-LABEL: name: mov_in_use_list_2x{{$}}
-# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %2:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 # GCN-NEXT: %3:vgpr_32 = COPY undef %0
 
-# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
+# GCN: %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
 
 
 name: mov_in_use_list_2x
@@ -30,12 +30,12 @@
     successors: %bb.2
 
     %2 = COPY %1
-    %3 = V_XOR_B32_e64 killed %2, undef %0, implicit %exec
+    %3 = V_XOR_B32_e64 killed %2, undef %0, implicit $exec
 
   bb.2:
     successors: %bb.1
 
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.1
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/hazard-inlineasm.mir b/llvm/test/CodeGen/AMDGPU/hazard-inlineasm.mir
index 6f09bb8..611c276 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard-inlineasm.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard-inlineasm.mir
@@ -16,8 +16,8 @@
 
 body: |
   bb.0:
-   FLAT_STORE_DWORDX4 %vgpr49_vgpr50, %vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit %exec, implicit %flat_scr
-   INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def %vgpr26_vgpr27, 2818058, def dead %sgpr14_sgpr15, 589833, %sgpr12, 327689, killed %vgpr51, 2621449, %vgpr46_vgpr47
+   FLAT_STORE_DWORDX4 $vgpr49_vgpr50, $vgpr26_vgpr27_vgpr28_vgpr29, 0, 0, 0, implicit $exec, implicit $flat_scr
+   INLINEASM &"v_mad_u64_u32 $0, $1, $2, $3, $4", 0, 2621450, def $vgpr26_vgpr27, 2818058, def dead $sgpr14_sgpr15, 589833, $sgpr12, 327689, killed $vgpr51, 2621449, $vgpr46_vgpr47
    S_ENDPGM
 ...
 
diff --git a/llvm/test/CodeGen/AMDGPU/hazard.mir b/llvm/test/CodeGen/AMDGPU/hazard.mir
index d0caacd..82e5c6d 100644
--- a/llvm/test/CodeGen/AMDGPU/hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/hazard.mir
@@ -3,7 +3,7 @@
 
 # GCN-LABEL: name: hazard_implicit_def
 # GCN:    bb.0.entry:
-# GCN:      %m0 = S_MOV_B32
+# GCN:      $m0 = S_MOV_B32
 # GFX9:     S_NOP 0
 # VI-NOT:   S_NOP_0
 # GCN:      V_INTERP_P1_F32
@@ -18,22 +18,22 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr7', virtual-reg: '' }
-  - { reg: '%vgpr4', virtual-reg: '' }
+  - { reg: '$sgpr7', virtual-reg: '' }
+  - { reg: '$vgpr4', virtual-reg: '' }
 body:             |
   bb.0.entry:
-    liveins: %sgpr7, %vgpr4
+    liveins: $sgpr7, $vgpr4
 
-    %m0 = S_MOV_B32 killed %sgpr7
-    %vgpr5 = IMPLICIT_DEF
-    %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0
+    $m0 = S_MOV_B32 killed $sgpr7
+    $vgpr5 = IMPLICIT_DEF
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 
 ...
 
 # GCN-LABEL: name: hazard_inlineasm
 # GCN:    bb.0.entry:
-# GCN:      %m0 = S_MOV_B32
+# GCN:      $m0 = S_MOV_B32
 # GFX9:     S_NOP 0
 # VI-NOT:   S_NOP_0
 # GCN:      V_INTERP_P1_F32
@@ -47,14 +47,14 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr7', virtual-reg: '' }
-  - { reg: '%vgpr4', virtual-reg: '' }
+  - { reg: '$sgpr7', virtual-reg: '' }
+  - { reg: '$vgpr4', virtual-reg: '' }
 body:             |
   bb.0.entry:
-    liveins: %sgpr7, %vgpr4
+    liveins: $sgpr7, $vgpr4
 
-    %m0 = S_MOV_B32 killed %sgpr7
-    INLINEASM &"; no-op", 1, 327690, def %vgpr5
-    %vgpr0 = V_INTERP_P1_F32 killed %vgpr4, 0, 0, implicit %m0, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr5, killed %vgpr0
+    $m0 = S_MOV_B32 killed $sgpr7
+    INLINEASM &"; no-op", 1, 327690, def $vgpr5
+    $vgpr0 = V_INTERP_P1_F32 killed $vgpr4, 0, 0, implicit $m0, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr5, killed $vgpr0
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir b/llvm/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
index e3a5599..8f034c3 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-skips-kill-uncond.mir
@@ -10,11 +10,11 @@
 # CHECK-LABEL: name: kill_uncond_branch
 
 # CHECK: bb.0:
-# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+# CHECK: S_CBRANCH_VCCNZ %bb.1, implicit $vcc
 
 # CHECK: bb.1:
 # CHECK: V_CMPX_LE_F32_e32
-# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit %exec
+# CHECK-NEXT: S_CBRANCH_EXECNZ %bb.2, implicit $exec
 
 # CHECK: bb.3:
 # CHECK-NEXT: EXP_DONE
@@ -28,12 +28,12 @@
 body: |
   bb.0:
     successors: %bb.1
-    S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit $vcc
 
   bb.1:
     successors: %bb.2
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
-    SI_KILL_F32_COND_IMM_TERMINATOR %vgpr0, 0, 3, implicit-def %exec, implicit-def %vcc, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    SI_KILL_F32_COND_IMM_TERMINATOR $vgpr0, 0, 3, implicit-def $exec, implicit-def $vcc, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
diff --git a/llvm/test/CodeGen/AMDGPU/insert-waits-callee.mir b/llvm/test/CodeGen/AMDGPU/insert-waits-callee.mir
index ad7cd0c..2bdaf59 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waits-callee.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-waits-callee.mir
@@ -13,13 +13,13 @@
 # CHECK-NEXT: V_ADD_F32
 # CHECK-NEXT: S_SETPC_B64
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr0_sgpr1' }
+  - { reg: '$vgpr0' }
 
 name: entry_callee_wait
 body:             |
   bb.0:
-    %vgpr0 = V_ADD_F32_e32 %vgpr0, %vgpr0, implicit %exec
-    S_SETPC_B64 killed %sgpr0_sgpr1
+    $vgpr0 = V_ADD_F32_e32 $vgpr0, $vgpr0, implicit $exec
+    S_SETPC_B64 killed $sgpr0_sgpr1
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/insert-waits-exp.mir b/llvm/test/CodeGen/AMDGPU/insert-waits-exp.mir
index 1055201..42af185 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-waits-exp.mir
+++ b/llvm/test/CodeGen/AMDGPU/insert-waits-exp.mir
@@ -20,10 +20,10 @@
 # CHECK-LABEL: name: exp_done_waitcnt{{$}}
 # CHECK: EXP_DONE
 # CHECK-NEXT: S_WAITCNT 3855
-# CHECK: %vgpr0 = V_MOV_B32
-# CHECK: %vgpr1 = V_MOV_B32
-# CHECK: %vgpr2 = V_MOV_B32
-# CHECK: %vgpr3 = V_MOV_B32
+# CHECK: $vgpr0 = V_MOV_B32
+# CHECK: $vgpr1 = V_MOV_B32
+# CHECK: $vgpr2 = V_MOV_B32
+# CHECK: $vgpr3 = V_MOV_B32
 name:            exp_done_waitcnt
 alignment:       0
 exposesReturnsTwice: false
@@ -47,17 +47,17 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0 (%ir-block.2):
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr1 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr2 = BUFFER_LOAD_DWORD_OFFSET %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    %vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `float addrspace(1)* undef`)
-    EXP_DONE 0, killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3, -1, -1, 15, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 1056964608, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 1065353216, implicit %exec
-    %vgpr2 = V_MOV_B32_e32 1073741824, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 1082130432, implicit %exec
-    SI_RETURN_TO_EPILOG killed %vgpr0, killed %vgpr1, killed %vgpr2, killed %vgpr3
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr1 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr2 = BUFFER_LOAD_DWORD_OFFSET $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    $vgpr3 = BUFFER_LOAD_DWORD_OFFSET killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `float addrspace(1)* undef`)
+    EXP_DONE 0, killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3, -1, -1, 15, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 1056964608, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 1065353216, implicit $exec
+    $vgpr2 = V_MOV_B32_e32 1073741824, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 1082130432, implicit $exec
+    SI_RETURN_TO_EPILOG killed $vgpr0, killed $vgpr1, killed $vgpr2, killed $vgpr3
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir b/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
index 698f2c3..d4f737e 100644
--- a/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
+++ b/llvm/test/CodeGen/AMDGPU/inserted-wait-states.mir
@@ -78,23 +78,23 @@
 
 body: |
   bb.0:
-    %vcc = S_MOV_B64 0
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vcc = S_MOV_B64 0
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    implicit %vcc = V_CMP_EQ_I32_e32 %vgpr1, %vgpr2, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    implicit $vcc = V_CMP_EQ_I32_e32 $vgpr1, $vgpr2, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
-    %vcc = V_CMP_EQ_I32_e64 %vgpr1, %vgpr2, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vcc = V_CMP_EQ_I32_e64 $vgpr1, $vgpr2, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
-    %vgpr4, %vcc = V_DIV_SCALE_F32 %vgpr1, %vgpr1, %vgpr3, implicit %exec
-    %vgpr0 = V_DIV_FMAS_F32 0, %vgpr1, 0, %vgpr2, 0, %vgpr3, 0, 0, implicit %vcc, implicit %exec
+    $vgpr4, $vcc = V_DIV_SCALE_F32 $vgpr1, $vgpr1, $vgpr3, implicit $exec
+    $vgpr0 = V_DIV_FMAS_F32 0, $vgpr1, 0, $vgpr2, 0, $vgpr3, 0, 0, implicit $vcc, implicit $exec
     S_ENDPGM
 
 ...
@@ -128,24 +128,24 @@
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 1
-    %sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 1
+    $sgpr1 = S_GETREG_B32 1
     S_BRANCH %bb.1
 
   bb.1:
     S_SETREG_IMM32_B32 0, 1
-    %sgpr1 = S_GETREG_B32 1
+    $sgpr1 = S_GETREG_B32 1
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 %sgpr0, 1
-    %sgpr1 = S_MOV_B32 0
-    %sgpr2 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 1
+    $sgpr1 = S_MOV_B32 0
+    $sgpr2 = S_GETREG_B32 1
     S_BRANCH %bb.3
 
   bb.3:
-    S_SETREG_B32 %sgpr0, 0
-    %sgpr1 = S_GETREG_B32 1
+    S_SETREG_B32 $sgpr0, 0
+    $sgpr1 = S_GETREG_B32 1
     S_ENDPGM
 ...
 
@@ -173,18 +173,18 @@
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 1
-    S_SETREG_B32 %sgpr1, 1
+    S_SETREG_B32 $sgpr0, 1
+    S_SETREG_B32 $sgpr1, 1
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 %sgpr0, 64
-    S_SETREG_B32 %sgpr1, 128
+    S_SETREG_B32 $sgpr0, 64
+    S_SETREG_B32 $sgpr1, 128
     S_BRANCH %bb.2
 
   bb.2:
-    S_SETREG_B32 %sgpr0, 1
-    S_SETREG_B32 %sgpr1, 0
+    S_SETREG_B32 $sgpr0, 1
+    S_SETREG_B32 $sgpr1, 0
     S_ENDPGM
 ...
 
@@ -230,33 +230,33 @@
 
 body: |
   bb.0:
-    BUFFER_STORE_DWORD_OFFSET %vgpr3, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX3_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_DWORDX4_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_FORMAT_XYZ_OFFSET %vgpr2_vgpr3_vgpr4, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_STORE_FORMAT_XYZW_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    BUFFER_ATOMIC_CMPSWAP_X2_OFFSET %vgpr2_vgpr3_vgpr4_vgpr5, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit %exec
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX3_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_DWORDX4_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_FORMAT_XYZ_OFFSET $vgpr2_vgpr3_vgpr4, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_STORE_FORMAT_XYZW_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    BUFFER_ATOMIC_CMPSWAP_X2_OFFSET $vgpr2_vgpr3_vgpr4_vgpr5, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, implicit $exec
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %vgpr2_vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_STORE_DWORDX4 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_ATOMIC_CMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
-    FLAT_ATOMIC_FCMPSWAP_X2 %vgpr0_vgpr1, %vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3 = V_MOV_B32_e32 0, implicit %exec
+    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, $vgpr2_vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_STORE_DWORDX4 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_ATOMIC_CMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
+    FLAT_ATOMIC_FCMPSWAP_X2 $vgpr0_vgpr1, $vgpr2_vgpr3_vgpr4_vgpr5, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3 = V_MOV_B32_e32 0, implicit $exec
     S_ENDPGM
 
 ...
@@ -302,23 +302,23 @@
 
 body: |
   bb.0:
-    %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %sgpr4 = V_READLANE_B32 %vgpr4, %sgpr0
+    $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $sgpr4 = V_READLANE_B32 $vgpr4, $sgpr0
     S_BRANCH %bb.1
 
   bb.1:
-    %vgpr0,%sgpr0_sgpr1 = V_ADD_I32_e64 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %vgpr4 = V_WRITELANE_B32 %sgpr0, %sgpr0
+    $vgpr0,$sgpr0_sgpr1 = V_ADD_I32_e64 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_WRITELANE_B32 $sgpr0, $sgpr0
     S_BRANCH %bb.2
 
   bb.2:
-    %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %sgpr4 = V_READLANE_B32 %vgpr4, %vcc_lo
+    $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $sgpr4 = V_READLANE_B32 $vgpr4, $vcc_lo
     S_BRANCH %bb.3
 
   bb.3:
-    %vgpr0,implicit %vcc = V_ADD_I32_e32 %vgpr1, %vgpr2, implicit %vcc, implicit %exec
-    %vgpr4 = V_WRITELANE_B32 %sgpr4, %vcc_lo
+    $vgpr0,implicit $vcc = V_ADD_I32_e32 $vgpr1, $vgpr2, implicit $vcc, implicit $exec
+    $vgpr4 = V_WRITELANE_B32 $sgpr4, $vcc_lo
     S_ENDPGM
 
 ...
@@ -341,13 +341,13 @@
 
 body: |
   bb.0:
-    S_SETREG_B32 %sgpr0, 3
-    S_RFE_B64 %sgpr2_sgpr3
+    S_SETREG_B32 $sgpr0, 3
+    S_RFE_B64 $sgpr2_sgpr3
     S_BRANCH %bb.1
 
   bb.1:
-    S_SETREG_B32 %sgpr0, 0
-    S_RFE_B64 %sgpr2_sgpr3
+    S_SETREG_B32 $sgpr0, 0
+    S_RFE_B64 $sgpr2_sgpr3
     S_ENDPGM
 
 ...
@@ -370,13 +370,13 @@
 
 body: |
   bb.0:
-    %sgpr0 = S_MOV_FED_B32 %sgpr0
-    %sgpr0 = S_MOV_B32 %sgpr0
+    $sgpr0 = S_MOV_FED_B32 $sgpr0
+    $sgpr0 = S_MOV_B32 $sgpr0
     S_BRANCH %bb.1
 
   bb.1:
-    %sgpr0 = S_MOV_FED_B32 %sgpr0
-    %vgpr0 = V_MOV_B32_e32 %sgpr0, implicit %exec
+    $sgpr0 = S_MOV_FED_B32 $sgpr0
+    $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec
     S_ENDPGM
 
 ...
@@ -410,23 +410,23 @@
 
 body: |
   bb.0:
-    %m0 = S_MOV_B32 0
-    %sgpr0 = S_MOVRELS_B32 %sgpr0, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0 = S_MOVRELS_B32 $sgpr0, implicit $m0
     S_BRANCH %bb.1
 
   bb.1:
-    %m0 = S_MOV_B32 0
-    %sgpr0_sgpr1 = S_MOVRELS_B64 %sgpr0_sgpr1, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0_sgpr1 = S_MOVRELS_B64 $sgpr0_sgpr1, implicit $m0
     S_BRANCH %bb.2
 
   bb.2:
-    %m0 = S_MOV_B32 0
-    %sgpr0 = S_MOVRELD_B32 %sgpr0, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0 = S_MOVRELD_B32 $sgpr0, implicit $m0
     S_BRANCH %bb.3
 
   bb.3:
-    %m0 = S_MOV_B32 0
-    %sgpr0_sgpr1 = S_MOVRELD_B64 %sgpr0_sgpr1, implicit %m0
+    $m0 = S_MOV_B32 0
+    $sgpr0_sgpr1 = S_MOVRELD_B64 $sgpr0_sgpr1, implicit $m0
     S_ENDPGM
 ...
 
@@ -459,23 +459,23 @@
 
 body: |
   bb.0:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P1_F32 %vgpr0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P1_F32 $vgpr0, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P2_F32 %vgpr0, %vgpr1, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P2_F32 $vgpr0, $vgpr1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_P1_F32_16bank %vgpr0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_P1_F32_16bank $vgpr0, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
-    %m0 = S_MOV_B32 0
-    %vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit %m0, implicit %exec
+    $m0 = S_MOV_B32 0
+    $vgpr0 = V_INTERP_MOV_F32 0, 0, 0, implicit $m0, implicit $exec
     S_ENDPGM
 ...
 
@@ -503,13 +503,13 @@
 
 body: |
   bb.0:
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
-    %vgpr1 = V_MOV_B32_dpp %vgpr1, %vgpr0, 0, 15, 15, 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
+    $vgpr1 = V_MOV_B32_dpp $vgpr1, $vgpr0, 0, 15, 15, 0, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
-    implicit %exec, implicit %vcc = V_CMPX_EQ_I32_e32 %vgpr0, %vgpr1, implicit %exec
-    %vgpr3 = V_MOV_B32_dpp %vgpr3, %vgpr0, 0, 15, 15, 0, implicit %exec
+    implicit $exec, implicit $vcc = V_CMPX_EQ_I32_e32 $vgpr0, $vgpr1, implicit $exec
+    $vgpr3 = V_MOV_B32_dpp $vgpr3, $vgpr0, 0, 15, 15, 0, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -521,10 +521,10 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr4_sgpr5' }
-  - { reg: '%sgpr6_sgpr7' }
-  - { reg: '%sgpr9' }
-  - { reg: '%sgpr0_sgpr1_sgpr2_sgpr3' }
+  - { reg: '$sgpr4_sgpr5' }
+  - { reg: '$sgpr6_sgpr7' }
+  - { reg: '$sgpr9' }
+  - { reg: '$sgpr0_sgpr1_sgpr2_sgpr3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -544,19 +544,19 @@
   - { id: 1, offset: 8, size: 4, alignment: 4 }
 body:             |
   bb.0.entry:
-    liveins: %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr9, %sgpr0_sgpr1_sgpr2_sgpr3
+    liveins: $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr9, $sgpr0_sgpr1_sgpr2_sgpr3
 
-    %flat_scr_lo = S_ADD_U32 %sgpr6, %sgpr9, implicit-def %scc
-    %flat_scr_hi = S_ADDC_U32 %sgpr7, 0, implicit-def %scc, implicit %scc
-    DBG_VALUE %noreg, 2, !5, !11, debug-location !12
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed %sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    dead %sgpr6_sgpr7 = KILL %sgpr4_sgpr5
-    %sgpr8 = S_MOV_B32 %sgpr5
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr + 4)
-    %sgpr8 = S_MOV_B32 %sgpr4, implicit killed %sgpr4_sgpr5
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr8, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr9, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.A.addr)
+    $flat_scr_lo = S_ADD_U32 $sgpr6, $sgpr9, implicit-def $scc
+    $flat_scr_hi = S_ADDC_U32 $sgpr7, 0, implicit-def $scc, implicit $scc
+    DBG_VALUE $noreg, 2, !5, !11, debug-location !12
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM killed $sgpr4_sgpr5, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    dead $sgpr6_sgpr7 = KILL $sgpr4_sgpr5
+    $sgpr8 = S_MOV_B32 $sgpr5
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr + 4)
+    $sgpr8 = S_MOV_B32 $sgpr4, implicit killed $sgpr4_sgpr5
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr8, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr9, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.A.addr)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
index 61aa39fc..3429726 100644
--- a/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
+++ b/llvm/test/CodeGen/AMDGPU/invert-br-undef-vcc.mir
@@ -26,7 +26,7 @@
 ...
 ---
 # CHECK-LABEL: name: invert_br_undef_vcc
-# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
+# CHECK: S_CBRANCH_VCCZ %bb.1, implicit undef $vcc
 
 name:            invert_br_undef_vcc
 alignment:       0
@@ -36,7 +36,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
+  - { reg: '$sgpr0_sgpr1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -53,34 +53,34 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
 
   bb.1.else:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 100, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 100, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
     S_BRANCH %bb.3
 
   bb.2.if:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 9, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 9, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
 
   bb.3.done:
-    liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
index d6b3d7b..c4a1542 100644
--- a/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
+++ b/llvm/test/CodeGen/AMDGPU/limit-coalesce.mir
@@ -11,8 +11,8 @@
 # CHECK:  - { id: 8, class: vreg_128, preferred-register: '' }
 # No more registers shall be defined
 # CHECK-NEXT: liveins:
-# CHECK:    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, %4,
-# CHECK:    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, %6,
+# CHECK:    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, %4,
+# CHECK:    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, %6,
 
 ---
 name:            main
@@ -33,7 +33,7 @@
   - { id: 8, class: vreg_128 }
   - { id: 9, class: vreg_128 }
 liveins:
-  - { reg: '%sgpr6', virtual-reg: '%1' }
+  - { reg: '$sgpr6', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -50,22 +50,22 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0, %vgpr0_vgpr1
+    liveins: $sgpr0, $vgpr0_vgpr1
 
     %3 = IMPLICIT_DEF
-    undef %4.sub0 = COPY %sgpr0
+    undef %4.sub0 = COPY $sgpr0
     %4.sub1 = COPY %3.sub0
     undef %5.sub0 = COPY %4.sub1
     %5.sub1 = COPY %4.sub0
-    FLAT_STORE_DWORDX2 %vgpr0_vgpr1, killed %5, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX2 $vgpr0_vgpr1, killed %5, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     %6 = IMPLICIT_DEF
     undef %7.sub0_sub1 = COPY %6
     %7.sub2 = COPY %3.sub0
-    FLAT_STORE_DWORDX3 %vgpr0_vgpr1, killed %7, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX3 $vgpr0_vgpr1, killed %7, 0, 0, 0, implicit $exec, implicit $flat_scr
 
     %8 = IMPLICIT_DEF
     undef %9.sub0_sub1_sub2 = COPY %8
     %9.sub3 = COPY %3.sub0
-    FLAT_STORE_DWORDX4 %vgpr0_vgpr1, killed %9, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX4 $vgpr0_vgpr1, killed %9, 0, 0, 0, implicit $exec, implicit $flat_scr
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/liveness.mir b/llvm/test/CodeGen/AMDGPU/liveness.mir
index 8bb946d..58239e8 100644
--- a/llvm/test/CodeGen/AMDGPU/liveness.mir
+++ b/llvm/test/CodeGen/AMDGPU/liveness.mir
@@ -17,7 +17,7 @@
 body: |
   bb.0:
     S_NOP 0, implicit-def undef %0.sub0
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.dbg.value.ll b/llvm/test/CodeGen/AMDGPU/llvm.dbg.value.ll
index ace859c..03b121d 100644
--- a/llvm/test/CodeGen/AMDGPU/llvm.dbg.value.ll
+++ b/llvm/test/CodeGen/AMDGPU/llvm.dbg.value.ll
@@ -5,7 +5,7 @@
 ; NOOPT: s_load_dwordx2 s[4:5]
 
 ; FIXME: Why is the SGPR4_SGPR5 reference being removed from DBG_VALUE?
-; NOOPT: ; kill: def %sgpr8_sgpr9 killed %sgpr4_sgpr5
+; NOOPT: ; kill: def $sgpr8_sgpr9 killed $sgpr4_sgpr5
 ; NOOPT-NEXT: ;DEBUG_VALUE: test_debug_value:globalptr_arg <- undef
 
 ; GCN: flat_store_dword
diff --git a/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir b/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
index 6c6b19a..5ab9ed9 100644
--- a/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
+++ b/llvm/test/CodeGen/AMDGPU/macro-fusion-cluster-vcc-uses.mir
@@ -1,9 +1,9 @@
 # RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs -run-pass machine-scheduler -o - %s | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: cluster_add_addc
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %3, implicit $exec
 name: cluster_add_addc
 registers:
   - { id: 0, class: vgpr_32 }
@@ -17,20 +17,20 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_ADDC_U32_e64 %6, %7, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: interleave_add64s
-# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
-# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit %exec
-# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+# GCN: dead %8:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %12:vgpr_32, dead %13:sreg_64_xexec = V_ADDC_U32_e64 %4, %5, %9, implicit $exec
+# GCN-NEXT: dead %10:vgpr_32, %11:sreg_64_xexec = V_ADD_I32_e64 %2, %3, implicit $exec
+# GCN-NEXT: dead %14:vgpr_32, dead %15:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %11, implicit $exec
 name: interleave_add64s
 registers:
   - { id: 0, class: vgpr_32 }
@@ -52,27 +52,27 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
-    %4 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_MOV_B32_e32 0, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
+    %4 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_MOV_B32_e32 0, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
 
-    %8, %9 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %10, %11 = V_ADD_I32_e64 %2, %3, implicit %exec
+    %8, %9 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %10, %11 = V_ADD_I32_e64 %2, %3, implicit $exec
 
 
-    %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit %exec
-    %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit %exec
+    %12, %13 = V_ADDC_U32_e64 %4, %5, %9, implicit $exec
+    %14, %15 = V_ADDC_U32_e64 %6, %7, %11, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_mov_addc
-# GCN: S_NOP 0, implicit-def %vcc
+# GCN: S_NOP 0, implicit-def $vcc
 # GCN-NEXT: %2:sreg_64_xexec = S_MOV_B64 0
-# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+# GCN-NEXT: dead %3:vgpr_32, dead %4:sreg_64_xexec = V_ADDC_U32_e64 %0, %1, %2, implicit $exec
 name: cluster_mov_addc
 registers:
   - { id: 0, class: vgpr_32 }
@@ -85,20 +85,20 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     %2 = S_MOV_B64 0
-    S_NOP 0, implicit def %vcc
-    %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit %exec
+    S_NOP 0, implicit def $vcc
+    %3, %4 = V_ADDC_U32_e64 %0, %1, %2, implicit $exec
 ...
 
 # GCN-LABEL: name: no_cluster_add_addc_diff_sgpr
-# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: S_NOP 0, implicit-def %vcc
+# GCN: dead %2:vgpr_32, dead %3:sreg_64_xexec = V_ADD_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: %6:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: %7:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: S_NOP 0, implicit-def $vcc
 # GCN-NEXT: %8:sreg_64_xexec = S_MOV_B64 0
-# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+# GCN-NEXT: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_ADDC_U32_e64 %6, %7, %8, implicit $exec
 name: no_cluster_add_addc_diff_sgpr
 registers:
   - { id: 0, class: vgpr_32 }
@@ -112,19 +112,19 @@
   - { id: 8, class: sreg_64_xexec }
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
     %8 = S_MOV_B64 0
-    %2, %3 = V_ADD_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit %exec
+    %2, %3 = V_ADD_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_ADDC_U32_e64 %6, %7, %8, implicit $exec
 ...
 # GCN-LABEL: name: cluster_sub_subb
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit %exec
-# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN: dead %2:vgpr_32, %3:sreg_64_xexec = V_SUB_I32_e64 %0, %1, implicit $exec
+# GCN: dead %4:vgpr_32, dead %5:sreg_64_xexec = V_SUBB_U32_e64 %6, %7, %3, implicit $exec
 name: cluster_sub_subb
 registers:
   - { id: 0, class: vgpr_32 }
@@ -138,19 +138,19 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2, %3 = V_SUB_I32_e64 %0, %1, implicit %exec
-    %6 = V_MOV_B32_e32 0, implicit %exec
-    %7 = V_MOV_B32_e32 0, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2, %3 = V_SUB_I32_e64 %0, %1, implicit $exec
+    %6 = V_MOV_B32_e32 0, implicit $exec
+    %7 = V_MOV_B32_e32 0, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4, %5 = V_SUBB_U32_e64 %6, %7, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_cmp_cndmask
-# GCN: S_NOP 0, implicit-def %vcc
-# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+# GCN: S_NOP 0, implicit-def $vcc
+# GCN-NEXT: %3:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %4:vgpr_32 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec
 name: cluster_cmp_cndmask
 registers:
   - { id: 0, class: vgpr_32 }
@@ -164,17 +164,17 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %4 = V_CNDMASK_B32_e64 %0, %1, %3, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_multi_use_cmp_cndmask
-# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 name: cluster_multi_use_cmp_cndmask
 registers:
   - { id: 0, class: vgpr_32 }
@@ -188,22 +188,22 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
 
-    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    S_NOP 0, implicit def %vcc
-    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    S_NOP 0, implicit def $vcc
+    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 ...
 
 # GCN-LABEL: name: cluster_multi_use_cmp_cndmask2
-# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+# GCN: %4:sreg_64_xexec = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+# GCN-NEXT: dead %5:vgpr_32 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+# GCN-NEXT: %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+# GCN-NEXT: dead %6:vgpr_32 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 name: cluster_multi_use_cmp_cndmask2
 registers:
   - { id: 0, class: vgpr_32 }
@@ -217,11 +217,11 @@
 
 body: |
   bb.0:
-    %0 = V_MOV_B32_e32 0, implicit %exec
-    %1 = V_MOV_B32_e32 0, implicit %exec
-    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit %exec
-    %2 = V_MOV_B32_e32 0, implicit %exec
-    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit %exec
-    %3 = V_MOV_B32_e32 0, implicit %exec
-    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit %exec
+    %0 = V_MOV_B32_e32 0, implicit $exec
+    %1 = V_MOV_B32_e32 0, implicit $exec
+    %4 = V_CMP_EQ_I32_e64 %0, %1, implicit $exec
+    %2 = V_MOV_B32_e32 0, implicit $exec
+    %5 = V_CNDMASK_B32_e64 %2, %1, %4, implicit $exec
+    %3 = V_MOV_B32_e32 0, implicit $exec
+    %6 = V_CNDMASK_B32_e64 %1, %3, %4, implicit $exec
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
index d4ddfbe..89a4780 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-atomic-insert-end.mir
@@ -65,8 +65,8 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
-  - { reg: '%vgpr0' }
+  - { reg: '$sgpr0_sgpr1' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -84,38 +84,38 @@
 body:             |
   bb.0 (%ir-block.0):
     successors: %bb.1.atomic(0x40000000), %bb.2.exit(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
  
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %vgpr1 = V_ASHRREV_I32_e32 31, %vgpr0, implicit %exec
-    %vgpr1_vgpr2 = V_LSHL_B64 %vgpr0_vgpr1, 3, implicit %exec
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 0
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $vgpr1 = V_ASHRREV_I32_e32 31, $vgpr0, implicit $exec
+    $vgpr1_vgpr2 = V_LSHL_B64 $vgpr0_vgpr1, 3, implicit $exec
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 0
     S_WAITCNT 127
-    %vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed %vgpr1_vgpr2, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 8 from %ir.tid.gep)
-    %vgpr0 = V_XOR_B32_e32 1, killed %vgpr0, implicit %exec
-    V_CMP_NE_U32_e32 0, killed %vgpr0, implicit-def %vcc, implicit %exec
-    %sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-    %sgpr2_sgpr3 = S_XOR_B64 %exec, killed %sgpr2_sgpr3, implicit-def dead %scc
-    SI_MASK_BRANCH %bb.2.exit, implicit %exec
+    $vgpr1_vgpr2 = BUFFER_LOAD_DWORDX2_ADDR64 killed $vgpr1_vgpr2, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 8 from %ir.tid.gep)
+    $vgpr0 = V_XOR_B32_e32 1, killed $vgpr0, implicit $exec
+    V_CMP_NE_U32_e32 0, killed $vgpr0, implicit-def $vcc, implicit $exec
+    $sgpr2_sgpr3 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+    $sgpr2_sgpr3 = S_XOR_B64 $exec, killed $sgpr2_sgpr3, implicit-def dead $scc
+    SI_MASK_BRANCH %bb.2.exit, implicit $exec
  
   bb.1.atomic:
     successors: %bb.2.exit(0x80000000)
-    liveins: %sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, %sgpr0_sgpr1, %sgpr2_sgpr3, %vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
+    liveins: $sgpr4_sgpr5_sgpr6_sgpr7:0x0000000C, $sgpr0_sgpr1, $sgpr2_sgpr3, $vgpr1_vgpr2_vgpr3_vgpr4:0x00000003
  
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    dead %vgpr0 = V_MOV_B32_e32 -1, implicit %exec
-    dead %vgpr0 = V_MOV_B32_e32 61440, implicit %exec
-    %sgpr4_sgpr5 = S_MOV_B64 0
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 15, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    dead $vgpr0 = V_MOV_B32_e32 -1, implicit $exec
+    dead $vgpr0 = V_MOV_B32_e32 61440, implicit $exec
+    $sgpr4_sgpr5 = S_MOV_B64 0
     S_WAITCNT 127
-    %vgpr0 = V_MOV_B32_e32 killed %sgpr0, implicit %exec, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit $exec
     S_WAITCNT 3952
-    BUFFER_ATOMIC_SMAX_ADDR64 killed %vgpr0, killed %vgpr1_vgpr2, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit %exec :: (volatile load seq_cst 4 from %ir.gep)
+    BUFFER_ATOMIC_SMAX_ADDR64 killed $vgpr0, killed $vgpr1_vgpr2, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 400, 0, implicit $exec :: (volatile load seq_cst 4 from %ir.gep)
  
   bb.2.exit:
-    liveins: %sgpr2_sgpr3
+    liveins: $sgpr2_sgpr3
 
-    %exec = S_OR_B64 %exec, killed %sgpr2_sgpr3, implicit-def %scc
+    $exec = S_OR_B64 $exec, killed $sgpr2_sgpr3, implicit-def $scc
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
index 2f3095c..e2bfae6 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-atomics.mir
@@ -79,8 +79,8 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -112,52 +112,52 @@
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load syncscope("agent") unordered 4 from %ir.else_ptr), (load syncscope("workgroup") seq_cst 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir
index 263bbeb..04fb9cb 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-1.mir
@@ -66,7 +66,7 @@
 # CHECK-LABEL: name: multiple_mem_operands
 
 # CHECK-LABEL: bb.3.done:
-# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 1, 1, 0
+# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 1, 1, 0
 
 name:            multiple_mem_operands
 alignment:       0
@@ -77,8 +77,8 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -110,52 +110,52 @@
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (non-temporal load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir
index 7e0c9e4..b13ea87 100644
--- a/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir
+++ b/llvm/test/CodeGen/AMDGPU/memory-legalizer-multiple-mem-operands-nontemporal-2.mir
@@ -66,7 +66,7 @@
 # CHECK-LABEL: name: multiple_mem_operands
 
 # CHECK-LABEL: bb.3.done:
-# CHECK: BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0
+# CHECK: BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0
 
 name:            multiple_mem_operands
 alignment:       0
@@ -77,8 +77,8 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '' }
-  - { reg: '%sgpr3', virtual-reg: '' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '' }
+  - { reg: '$sgpr3', virtual-reg: '' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -110,52 +110,52 @@
 body:             |
   bb.0.entry:
     successors: %bb.1.if(0x30000000), %bb.2.else(0x50000000)
-    liveins: %sgpr0_sgpr1, %sgpr3
+    liveins: $sgpr0_sgpr1, $sgpr3
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM %sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr10 = S_MOV_B32 4294967295, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %sgpr11 = S_MOV_B32 15204352, implicit-def %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 4, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr01)
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 44, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr8 = S_MOV_B32 &SCRATCH_RSRC_DWORD0, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr4_sgpr5 = S_LOAD_DWORDX2_IMM $sgpr0_sgpr1, 36, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr9 = S_MOV_B32 &SCRATCH_RSRC_DWORD1, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr10 = S_MOV_B32 4294967295, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $sgpr11 = S_MOV_B32 15204352, implicit-def $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 4, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr01)
     S_WAITCNT 127
-    S_CMP_LG_U32 killed %sgpr2, 0, implicit-def %scc
+    S_CMP_LG_U32 killed $sgpr2, 0, implicit-def $scc
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 2, implicit %exec
-    %vgpr1 = V_MOV_B32_e32 32772, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN killed %vgpr0, killed %vgpr1, %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.scratchptr12)
-    S_CBRANCH_SCC0 %bb.1.if, implicit killed %scc
+    $vgpr0 = V_MOV_B32_e32 2, implicit $exec
+    $vgpr1 = V_MOV_B32_e32 32772, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN killed $vgpr0, killed $vgpr1, $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.scratchptr12)
+    S_CBRANCH_SCC0 %bb.1.if, implicit killed $scc
 
   bb.2.else:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 52, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 32772, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 32772, implicit $exec
     S_BRANCH %bb.3.done
 
   bb.1.if:
     successors: %bb.3.done(0x80000000)
-    liveins: %sgpr0_sgpr1, %sgpr4_sgpr5, %sgpr3, %sgpr8_sgpr9_sgpr10_sgpr11
+    liveins: $sgpr0_sgpr1, $sgpr4_sgpr5, $sgpr3, $sgpr8_sgpr9_sgpr10_sgpr11
 
-    %sgpr0 = S_LOAD_DWORD_IMM killed %sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
+    $sgpr0 = S_LOAD_DWORD_IMM killed $sgpr0_sgpr1, 48, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
     S_WAITCNT 3855
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
 
   bb.3.done:
-    liveins: %sgpr3, %sgpr4_sgpr5, %sgpr8_sgpr9_sgpr10_sgpr11, %vgpr0, %sgpr0
+    liveins: $sgpr3, $sgpr4_sgpr5, $sgpr8_sgpr9_sgpr10_sgpr11, $vgpr0, $sgpr0
 
     S_WAITCNT 127
-    %sgpr0 = S_LSHL_B32 killed %sgpr0, 2, implicit-def dead %scc
-    %vgpr0 = V_ADD_I32_e32 killed %sgpr0, killed %vgpr0, implicit-def dead %vcc, implicit %exec
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed %vgpr0, killed %sgpr8_sgpr9_sgpr10_sgpr11, %sgpr3, 0, 0, 0, 0, implicit %exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
-    %vgpr1 = V_MOV_B32_e32 %sgpr4, implicit %exec, implicit-def %vgpr1_vgpr2, implicit %sgpr4_sgpr5
-    %vgpr2 = V_MOV_B32_e32 killed %sgpr5, implicit %exec, implicit %sgpr4_sgpr5, implicit %exec
+    $sgpr0 = S_LSHL_B32 killed $sgpr0, 2, implicit-def dead $scc
+    $vgpr0 = V_ADD_I32_e32 killed $sgpr0, killed $vgpr0, implicit-def dead $vcc, implicit $exec
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFEN killed $vgpr0, killed $sgpr8_sgpr9_sgpr10_sgpr11, $sgpr3, 0, 0, 0, 0, implicit $exec :: (load 4 from %ir.else_ptr), (non-temporal load 4 from %ir.if_ptr)
+    $vgpr1 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit-def $vgpr1_vgpr2, implicit $sgpr4_sgpr5
+    $vgpr2 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit $sgpr4_sgpr5, implicit $exec
     S_WAITCNT 3952
-    FLAT_STORE_DWORD killed %vgpr1_vgpr2, killed %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.out)
+    FLAT_STORE_DWORD killed $vgpr1_vgpr2, killed $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-load-store-vreg.mir b/llvm/test/CodeGen/AMDGPU/merge-load-store-vreg.mir
index fbd5611..0ab2974 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-load-store-vreg.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-load-store-vreg.mir
@@ -3,7 +3,7 @@
 
 # If there's a base offset, check that SILoadStoreOptimizer creates
 # V_ADD_{I|U}32_e64 for that offset; _e64 uses a vreg for the carry (rather than
-# %vcc, which is used in _e32); this ensures that %vcc is not inadvertently
+# $vcc, which is used in _e32); this ensures that $vcc is not inadvertently
 # clobbered.
 
 # GCN-LABEL: name: kernel
@@ -46,15 +46,15 @@
     S_ENDPGM
 
   bb.2:
-    %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit %exec
-    %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit %exec
-    V_CMP_NE_U32_e32 1, %2, implicit-def %vcc, implicit %exec
-    DS_WRITE_B32 %0, %0, 1024, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp)
-    %3:vgpr_32 = V_MOV_B32_e32 0, implicit %exec
-    DS_WRITE_B32 %0, %3, 1056, 0, implicit %m0, implicit %exec :: (store 4 into %ir.tmp1)
-    %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp2)
-    %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit %m0, implicit %exec :: (load 4 from %ir.tmp3)
-    %vcc = S_AND_B64 %exec, %vcc, implicit-def %scc
-    S_CBRANCH_VCCNZ %bb.1, implicit %vcc
+    %1:sreg_64_xexec = V_CMP_NE_U32_e64 %0, 0, implicit $exec
+    %2:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %1, implicit $exec
+    V_CMP_NE_U32_e32 1, %2, implicit-def $vcc, implicit $exec
+    DS_WRITE_B32 %0, %0, 1024, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp)
+    %3:vgpr_32 = V_MOV_B32_e32 0, implicit $exec
+    DS_WRITE_B32 %0, %3, 1056, 0, implicit $m0, implicit $exec :: (store 4 into %ir.tmp1)
+    %4:vgpr_32 = DS_READ_B32 %3, 1088, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp2)
+    %5:vgpr_32 = DS_READ_B32 %3, 1120, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp3)
+    $vcc = S_AND_B64 $exec, $vcc, implicit-def $scc
+    S_CBRANCH_VCCNZ %bb.1, implicit $vcc
     S_BRANCH %bb.1
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-load-store.mir b/llvm/test/CodeGen/AMDGPU/merge-load-store.mir
index d61cefd..78ed249 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-load-store.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-load-store.mir
@@ -34,7 +34,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%1' }
+  - { reg: '$vgpr0', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -51,20 +51,20 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %1:vgpr_32 = COPY %vgpr0
-    %m0 = S_MOV_B32 -1
-    %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.0)
-    DS_WRITE_B32 %1, killed %2, 64, 0, implicit %m0, implicit %exec :: (store 4 into %ir.ptr.64)
+    %1:vgpr_32 = COPY $vgpr0
+    $m0 = S_MOV_B32 -1
+    %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.0)
+    DS_WRITE_B32 %1, killed %2, 64, 0, implicit $m0, implicit $exec :: (store 4 into %ir.ptr.64)
 
     ; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the
     ; other two loads.
-    %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit %m0, implicit %exec :: (load 8 from %ir.ptr.64, align 4)
+    %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit $m0, implicit $exec :: (load 8 from %ir.ptr.64, align 4)
     %3:vgpr_32 = COPY %6.sub0
-    %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit %m0, implicit %exec :: (load 4 from %ir.ptr.4)
-    %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def %vcc, implicit %exec
-    DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed %m0, implicit %exec :: (store 4 into %ir.ptr.0)
+    %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.4)
+    %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def $vcc, implicit $exec
+    DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed $m0, implicit $exec :: (store 4 into %ir.ptr.0)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/merge-m0.mir b/llvm/test/CodeGen/AMDGPU/merge-m0.mir
index 720642a..73a6b13 100644
--- a/llvm/test/CodeGen/AMDGPU/merge-m0.mir
+++ b/llvm/test/CodeGen/AMDGPU/merge-m0.mir
@@ -64,68 +64,68 @@
 
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    S_CBRANCH_VCCZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
     successors: %bb.2
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
     successors: %bb.3
-    SI_INIT_M0 65536, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 65536, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.3
 
   bb.3:
     successors: %bb.4, %bb.5
-    S_CBRANCH_VCCZ %bb.4, implicit undef %vcc
+    S_CBRANCH_VCCZ %bb.4, implicit undef $vcc
     S_BRANCH %bb.5
 
   bb.4:
     successors: %bb.6
-    SI_INIT_M0 3, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 4, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 3, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 4, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.6
 
   bb.5:
     successors: %bb.6
-    SI_INIT_M0 3, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 4, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 3, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 4, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     S_BRANCH %bb.6
 
   bb.6:
     successors: %bb.0.entry, %bb.6
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
     %2 = IMPLICIT_DEF
-    SI_INIT_M0 %2, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 %2, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    SI_INIT_M0 -1, implicit-def %m0
-    DS_WRITE_B32 %0, %1, 0, 0, implicit %m0, implicit %exec
-    S_CBRANCH_VCCZ %bb.6, implicit undef %vcc
+    SI_INIT_M0 %2, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 %2, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    SI_INIT_M0 -1, implicit-def $m0
+    DS_WRITE_B32 %0, %1, 0, 0, implicit $m0, implicit $exec
+    S_CBRANCH_VCCZ %bb.6, implicit undef $vcc
     S_BRANCH %bb.0.entry
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/misched-killflags.mir b/llvm/test/CodeGen/AMDGPU/misched-killflags.mir
index ac3a25e..811ef0d 100644
--- a/llvm/test/CodeGen/AMDGPU/misched-killflags.mir
+++ b/llvm/test/CodeGen/AMDGPU/misched-killflags.mir
@@ -5,41 +5,41 @@
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3
+    liveins: $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3
 
-    %sgpr33 = S_MOV_B32 %sgpr7
-    %sgpr32 = S_MOV_B32 %sgpr33
-    %sgpr10 = S_MOV_B32 5
-    %sgpr9 = S_MOV_B32 4
-    %sgpr8 = S_MOV_B32 3
-    BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
-      %sgpr6_sgpr7 = S_GETPC_B64
-      %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
-      %sgpr7 = S_ADDC_U32 internal %sgpr7,0, implicit-def %scc, implicit internal %scc
+    $sgpr33 = S_MOV_B32 $sgpr7
+    $sgpr32 = S_MOV_B32 $sgpr33
+    $sgpr10 = S_MOV_B32 5
+    $sgpr9 = S_MOV_B32 4
+    $sgpr8 = S_MOV_B32 3
+    BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc {
+      $sgpr6_sgpr7 = S_GETPC_B64
+      $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc
+      $sgpr7 = S_ADDC_U32 internal $sgpr7,0, implicit-def $scc, implicit internal $scc
     }
-    %sgpr4 = S_MOV_B32 %sgpr33
-    %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-    %vgpr3 = V_MOV_B32_e32 %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
-    S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+    $sgpr4 = S_MOV_B32 $sgpr33
+    $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+    $vgpr3 = V_MOV_B32_e32 $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec
+    S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3
     S_ENDPGM
 ...
 # CHECK-LABEL: name: func0
-# CHECK: %sgpr10 = S_MOV_B32 5
-# CHECK: %sgpr9 = S_MOV_B32 4
-# CHECK: %sgpr8 = S_MOV_B32 3
-# CHECK: %sgpr33 = S_MOV_B32 killed %sgpr7
-# CHECK: %vgpr0 = V_MOV_B32_e32 %sgpr8, implicit %exec, implicit-def %vgpr0_vgpr1_vgpr2_vgpr3, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: BUNDLE implicit-def %sgpr6_sgpr7, implicit-def %sgpr6, implicit-def %sgpr7, implicit-def %scc {
-# CHECK:   %sgpr6_sgpr7 = S_GETPC_B64
-# CHECK:   %sgpr6 = S_ADD_U32 internal %sgpr6, 0, implicit-def %scc
-# CHECK:   %sgpr7 = S_ADDC_U32 internal %sgpr7, 0, implicit-def %scc, implicit internal %scc
+# CHECK: $sgpr10 = S_MOV_B32 5
+# CHECK: $sgpr9 = S_MOV_B32 4
+# CHECK: $sgpr8 = S_MOV_B32 3
+# CHECK: $sgpr33 = S_MOV_B32 killed $sgpr7
+# CHECK: $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit-def $vgpr0_vgpr1_vgpr2_vgpr3, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: BUNDLE implicit-def $sgpr6_sgpr7, implicit-def $sgpr6, implicit-def $sgpr7, implicit-def $scc {
+# CHECK:   $sgpr6_sgpr7 = S_GETPC_B64
+# CHECK:   $sgpr6 = S_ADD_U32 internal $sgpr6, 0, implicit-def $scc
+# CHECK:   $sgpr7 = S_ADDC_U32 internal $sgpr7, 0, implicit-def $scc, implicit internal $scc
 # CHECK: }
-# CHECK: %sgpr4 = S_MOV_B32 %sgpr33
-# CHECK: %vgpr1 = V_MOV_B32_e32 %sgpr9, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: %vgpr2 = V_MOV_B32_e32 %sgpr10, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11
-# CHECK: %vgpr3 = V_MOV_B32_e32 killed %sgpr11, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %exec
-# CHECK: %sgpr32 = S_MOV_B32 killed %sgpr33
-# CHECK: S_NOP 0, implicit killed %sgpr6_sgpr7, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit killed %vgpr0_vgpr1_vgpr2_vgpr3
+# CHECK: $sgpr4 = S_MOV_B32 $sgpr33
+# CHECK: $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: $vgpr2 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11
+# CHECK: $vgpr3 = V_MOV_B32_e32 killed $sgpr11, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $exec
+# CHECK: $sgpr32 = S_MOV_B32 killed $sgpr33
+# CHECK: S_NOP 0, implicit killed $sgpr6_sgpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit killed $vgpr0_vgpr1_vgpr2_vgpr3
 # CHECK: S_ENDPGM
diff --git a/llvm/test/CodeGen/AMDGPU/movrels-bug.mir b/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
index 9c330bc..84e34f3 100644
--- a/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
+++ b/llvm/test/CodeGen/AMDGPU/movrels-bug.mir
@@ -20,12 +20,12 @@
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %m0 = S_MOV_B32 undef %sgpr0
-    V_MOVRELD_B32_e32 undef %vgpr2, 0, implicit %m0, implicit %exec, implicit-def %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4)
-    %m0 = S_MOV_B32 undef %sgpr0
-    %vgpr1 = V_MOVRELS_B32_e32 undef %vgpr1, implicit %m0, implicit %exec, implicit killed %vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-    %vgpr4 = V_MAC_F32_e32 undef %vgpr0, undef %vgpr0, undef %vgpr4, implicit %exec
-    EXP_DONE 15, undef %vgpr0, killed %vgpr1, killed %vgpr4, undef %vgpr0, 0, 0, 12, implicit %exec
+    $m0 = S_MOV_B32 undef $sgpr0
+    V_MOVRELD_B32_e32 undef $vgpr2, 0, implicit $m0, implicit $exec, implicit-def $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8, implicit undef $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8(tied-def 4)
+    $m0 = S_MOV_B32 undef $sgpr0
+    $vgpr1 = V_MOVRELS_B32_e32 undef $vgpr1, implicit $m0, implicit $exec, implicit killed $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
+    $vgpr4 = V_MAC_F32_e32 undef $vgpr0, undef $vgpr0, undef $vgpr4, implicit $exec
+    EXP_DONE 15, undef $vgpr0, killed $vgpr1, killed $vgpr4, undef $vgpr0, 0, 0, 12, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
index 9702d18..4511fdd 100644
--- a/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/opt-sgpr-to-vgpr-copy.mir
@@ -6,19 +6,19 @@
 # GCN:        %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0
 # GCN-NEXT:   %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576
 # GCN-NEXT:   %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
 
 
 # GCN-LABEL: {{^}}name: const_to_sgpr_multiple_use{{$}}
 # GCN:        %[[HI:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 0
 # GCN-NEXT:   %[[LO:[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1048576
 # GCN-NEXT:   %[[SGPR_PAIR:[0-9]+]]:sreg_64 = REG_SEQUENCE killed %[[LO]], %subreg.sub0, killed %[[HI]], %subreg.sub1
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
-# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit %exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
+# GCN-NEXT:   V_CMP_LT_U64_e64 killed %{{[0-9]+}}, %[[SGPR_PAIR]], implicit $exec
 
 # GCN-LABEL: {{^}}name: const_to_sgpr_subreg{{$}}
 # GCN:       %[[OP0:[0-9]+]]:vreg_64 = REG_SEQUENCE killed %{{[0-9]+}}, %subreg.sub0, killed %{{[0-9]+}}, %subreg.sub1
-# GCN-NEXT:  V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit %exec
+# GCN-NEXT:  V_CMP_LT_U32_e64 killed %[[OP0]].sub0, 12, implicit $exec
 
 --- |
   define amdgpu_kernel void @const_to_sgpr(i32 addrspace(1)* nocapture %arg, i64 %id) {
@@ -96,15 +96,15 @@
   - { id: 29, class: vgpr_32 }
   - { id: 30, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %6 = COPY %7
@@ -115,32 +115,32 @@
     %12 = COPY %10.sub1
     %13 = COPY %8.sub0
     %14 = COPY %8.sub1
-    %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc
-    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc
+    %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc
+    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc
     %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1
     %18 = S_MOV_B32 0
     %19 = S_MOV_B32 1048576
     %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1
     %22 = COPY killed %20
-    %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit %exec
-    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %21 = V_CMP_LT_U64_e64 killed %17, %22, implicit $exec
+    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %23 = S_MOV_B32 2
-    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc
+    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc
     %25 = S_MOV_B32 61440
     %26 = S_MOV_B32 0
     %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1
     %28 = REG_SEQUENCE %6, 17, killed %27, 18
-    %29 = V_MOV_B32_e32 0, implicit %exec
+    %29 = V_MOV_B32_e32 0, implicit $exec
     %30 = COPY %24
-    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...
@@ -194,15 +194,15 @@
   - { id: 38, class: vgpr_32 }
   - { id: 39, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %9 = S_LOAD_DWORDX2_IMM %3, 13, 0
@@ -214,39 +214,39 @@
     %13 = COPY %11.sub1
     %14 = COPY %8.sub0
     %15 = COPY %8.sub1
-    %16 = S_ADD_U32 %12, killed %14, implicit-def %scc
-    %17 = S_ADDC_U32 %13, killed %15, implicit-def dead %scc, implicit %scc
+    %16 = S_ADD_U32 %12, killed %14, implicit-def $scc
+    %17 = S_ADDC_U32 %13, killed %15, implicit-def dead $scc, implicit $scc
     %18 = REG_SEQUENCE killed %16, %subreg.sub0, killed %17, %subreg.sub1
     %19 = COPY %9.sub0
     %20 = COPY %9.sub1
-    %21 = S_ADD_U32 %12, killed %19, implicit-def %scc
-    %22 = S_ADDC_U32 %13, killed %20, implicit-def dead %scc, implicit %scc
+    %21 = S_ADD_U32 %12, killed %19, implicit-def $scc
+    %22 = S_ADDC_U32 %13, killed %20, implicit-def dead $scc, implicit $scc
     %23 = REG_SEQUENCE killed %21, %subreg.sub0, killed %22, %subreg.sub1
     %24 = S_MOV_B32 0
     %25 = S_MOV_B32 1048576
     %26 = REG_SEQUENCE killed %25, %subreg.sub0, killed %24, %subreg.sub1
     %28 = COPY %26
-    %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit %exec
-    %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit %exec
-    %31 = S_AND_B64 killed %27, killed %29, implicit-def dead %scc
-    %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %27 = V_CMP_LT_U64_e64 killed %18, %28, implicit $exec
+    %29 = V_CMP_LT_U64_e64 killed %23, %28, implicit $exec
+    %31 = S_AND_B64 killed %27, killed %29, implicit-def dead $scc
+    %1 = SI_IF killed %31, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %32 = S_MOV_B32 2
-    %33 = S_LSHL_B64 %0, killed %32, implicit-def dead %scc
+    %33 = S_LSHL_B64 %0, killed %32, implicit-def dead $scc
     %34 = S_MOV_B32 61440
     %35 = S_MOV_B32 0
     %36 = REG_SEQUENCE killed %35, %subreg.sub0, killed %34, %subreg.sub1
     %37 = REG_SEQUENCE %6, 17, killed %36, 18
-    %38 = V_MOV_B32_e32 0, implicit %exec
+    %38 = V_MOV_B32_e32 0, implicit $exec
     %39 = COPY %33
-    BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %38, killed %39, killed %37, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...
@@ -291,15 +291,15 @@
   - { id: 29, class: vgpr_32 }
   - { id: 30, class: vreg_64 }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%2' }
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%3' }
+  - { reg: '$vgpr0', virtual-reg: '%2' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%3' }
 body:             |
   bb.0.bb:
     successors: %bb.1.bb1(0x40000000), %bb.2.bb2(0x40000000)
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %3 = COPY %sgpr0_sgpr1
-    %2 = COPY %vgpr0
+    %3 = COPY $sgpr0_sgpr1
+    %2 = COPY $vgpr0
     %7 = S_LOAD_DWORDX2_IMM %3, 9, 0
     %8 = S_LOAD_DWORDX2_IMM %3, 11, 0
     %6 = COPY %7
@@ -310,32 +310,32 @@
     %12 = COPY %10.sub1
     %13 = COPY %8.sub0
     %14 = COPY %8.sub1
-    %15 = S_ADD_U32 killed %11, killed %13, implicit-def %scc
-    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead %scc, implicit %scc
+    %15 = S_ADD_U32 killed %11, killed %13, implicit-def $scc
+    %16 = S_ADDC_U32 killed %12, killed %14, implicit-def dead $scc, implicit $scc
     %17 = REG_SEQUENCE killed %15, %subreg.sub0, killed %16, %subreg.sub1
     %18 = S_MOV_B32 12
     %19 = S_MOV_B32 1048576
     %20 = REG_SEQUENCE killed %19, %subreg.sub0, killed %18, %subreg.sub1
     %22 = COPY killed %20.sub1
-    %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit %exec
-    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    %21 = V_CMP_LT_U32_e64 killed %17.sub0, %22, implicit $exec
+    %1 = SI_IF killed %21, %bb.2.bb2, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_BRANCH %bb.1.bb1
 
   bb.1.bb1:
     successors: %bb.2.bb2(0x80000000)
 
     %23 = S_MOV_B32 2
-    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead %scc
+    %24 = S_LSHL_B64 %0, killed %23, implicit-def dead $scc
     %25 = S_MOV_B32 61440
     %26 = S_MOV_B32 0
     %27 = REG_SEQUENCE killed %26, %subreg.sub0, killed %25, %subreg.sub1
     %28 = REG_SEQUENCE %6, 17, killed %27, 18
-    %29 = V_MOV_B32_e32 0, implicit %exec
+    %29 = V_MOV_B32_e32 0, implicit $exec
     %30 = COPY %24
-    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_ADDR64 killed %29, killed %30, killed %28, 0, 0, 0, 0, 0, implicit $exec
 
   bb.2.bb2:
-    SI_END_CF %1, implicit-def dead %exec, implicit-def dead %scc, implicit %exec
+    SI_END_CF %1, implicit-def dead $exec, implicit-def dead $scc, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index 4ed8360..2c42f00 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -147,8 +147,8 @@
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_and_saveexec_xor
@@ -159,7 +159,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -176,37 +176,37 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_and_saveexec
@@ -217,7 +217,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -234,36 +234,36 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_or_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_or_saveexec
@@ -274,7 +274,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -291,39 +291,39 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_OR_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
 # CHECK-NEXT: SI_MASK_BRANCH
 name:            optimize_if_and_saveexec_xor_valu_middle
 alignment:       0
@@ -333,7 +333,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -350,41 +350,41 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr0_sgpr1
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 name:            optimize_if_and_saveexec_xor_wrong_reg
 alignment:       0
 exposesReturnsTwice: false
@@ -393,7 +393,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -410,40 +410,40 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr6 = S_MOV_B32 -1
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr0_sgpr1
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr6 = S_MOV_B32 -1
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr0_sgpr1
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    liveins: $sgpr0_sgpr1 , $sgpr4_sgpr5_sgpr6_sgpr7
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7
+    liveins: $vgpr0, $sgpr0_sgpr1, $sgpr4_sgpr5_sgpr6_sgpr7
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_modify_copy_to_exec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 
 name:            optimize_if_and_saveexec_xor_modify_copy_to_exec
 alignment:       0
@@ -453,7 +453,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -470,42 +470,42 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr0 = S_MOV_B32 0
-    %sgpr1 = S_MOV_B32 1
-    %sgpr2 = S_MOV_B32 -1
-    %sgpr3 = S_MOV_B32 61440
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr0 = S_MOV_B32 0
+    $sgpr1 = S_MOV_B32 1
+    $sgpr2 = S_MOV_B32 -1
+    $sgpr3 = S_MOV_B32 61440
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr2_sgpr3
 # CHECK-NEXT: SI_MASK_BRANCH
 name:            optimize_if_and_saveexec_xor_live_out_setexec
 alignment:       0
@@ -515,7 +515,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -532,40 +532,40 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
-    S_SLEEP 0, implicit %sgpr2_sgpr3
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+    S_SLEEP 0, implicit $sgpr2_sgpr3
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 
 # CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = COPY %exec
-# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = COPY $exec
+# CHECK: $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 
 name:            optimize_if_unknown_saveexec
 alignment:       0
@@ -575,7 +575,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -592,36 +592,36 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
 # CHECK-NEXT: SI_MASK_BRANCH
 
 name:            optimize_if_andn2_saveexec
@@ -632,7 +632,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -649,38 +649,38 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_ANDN2_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
-# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
 name:            optimize_if_andn2_saveexec_no_commute
 alignment:       0
 exposesReturnsTwice: false
@@ -689,7 +689,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0' }
+  - { reg: '$vgpr0' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -706,30 +706,30 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.main_body:
-    liveins: %vgpr0
+    liveins: $vgpr0
 
-    %sgpr0_sgpr1 = COPY %exec
-    %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
-    %vgpr0 = V_MOV_B32_e32 4, implicit %exec
-    %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $sgpr0_sgpr1 = COPY $exec
+    $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+    $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+    $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1.if:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
 
   bb.2.end:
-    liveins: %vgpr0, %sgpr0_sgpr1
+    liveins: $vgpr0, $sgpr0_sgpr1
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/readlane_exec0.mir b/llvm/test/CodeGen/AMDGPU/readlane_exec0.mir
index b6d58d7..3e2d1cc 100644
--- a/llvm/test/CodeGen/AMDGPU/readlane_exec0.mir
+++ b/llvm/test/CodeGen/AMDGPU/readlane_exec0.mir
@@ -10,23 +10,23 @@
 body:       |
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %vgpr1_vgpr2:0x00000001, %vgpr2_vgpr3:0x00000003
+    liveins: $vgpr1_vgpr2:0x00000001, $vgpr2_vgpr3:0x00000003
 
-    %vgpr4 = V_AND_B32_e32 1, %vgpr1, implicit %exec
-    V_CMP_EQ_U32_e32 1, killed %vgpr4, implicit-def %vcc, implicit %exec
-    %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $vgpr4 = V_AND_B32_e32 1, $vgpr1, implicit $exec
+    V_CMP_EQ_U32_e32 1, killed $vgpr4, implicit-def $vcc, implicit $exec
+    $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 killed $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
 
-   %sgpr10 = V_READFIRSTLANE_B32 %vgpr2, implicit %exec
-   %sgpr11 = V_READFIRSTLANE_B32 %vgpr3, implicit %exec
-   %sgpr10 = S_LOAD_DWORD_IMM killed %sgpr10_sgpr11, 0, 0
+   $sgpr10 = V_READFIRSTLANE_B32 $vgpr2, implicit $exec
+   $sgpr11 = V_READFIRSTLANE_B32 $vgpr3, implicit $exec
+   $sgpr10 = S_LOAD_DWORD_IMM killed $sgpr10_sgpr11, 0, 0
    S_WAITCNT 127
-   %vgpr0 = V_XOR_B32_e32 killed %sgpr10, killed %vgpr0, implicit %exec
+   $vgpr0 = V_XOR_B32_e32 killed $sgpr10, killed $vgpr0, implicit $exec
 
   bb.2:
 
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/reduce-saveexec.mir b/llvm/test/CodeGen/AMDGPU/reduce-saveexec.mir
index 6f6b0de..fb22963 100644
--- a/llvm/test/CodeGen/AMDGPU/reduce-saveexec.mir
+++ b/llvm/test/CodeGen/AMDGPU/reduce-saveexec.mir
@@ -2,146 +2,146 @@
 
 ---
 # GCN-LABEL: name: reduce_and_saveexec
-# GCN:      %exec = S_AND_B64 %exec, killed %vcc
+# GCN:      $exec = S_AND_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_and_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_and_saveexec_commuted
-# GCN:      %exec = S_AND_B64 killed %vcc, %exec
+# GCN:      $exec = S_AND_B64 killed $vcc, $exec
 # GCN-NEXT: S_ENDPGM
 name: reduce_and_saveexec_commuted
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 killed %vcc, %exec, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 killed $vcc, $exec, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_and_saveexec_liveout
-# GCN:      %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc
-# GCN-NEXT: %exec = COPY
+# GCN:      $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc
+# GCN-NEXT: $exec = COPY
 name: reduce_and_saveexec_liveout
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_AND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_AND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: and_saveexec
-# GCN:      %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc
+# GCN:      $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc
 # GCN-NEXT: S_ENDPGM
 name: and_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = COPY %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-    %exec = S_MOV_B64_term %sgpr2_sgpr3
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = COPY $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+    $exec = S_MOV_B64_term $sgpr2_sgpr3
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_or_saveexec
-# GCN:      %exec = S_OR_B64 %exec, killed %vcc
+# GCN:      $exec = S_OR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_or_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_OR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_OR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_xor_saveexec
-# GCN:      %exec = S_XOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_XOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_xor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_andn2_saveexec
-# GCN:      %exec = S_ANDN2_B64 %exec, killed %vcc
+# GCN:      $exec = S_ANDN2_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_andn2_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_ANDN2_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_ANDN2_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_orn2_saveexec
-# GCN:      %exec = S_ORN2_B64 %exec, killed %vcc
+# GCN:      $exec = S_ORN2_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_orn2_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_ORN2_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_ORN2_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_nand_saveexec
-# GCN:      %exec = S_NAND_B64 %exec, killed %vcc
+# GCN:      $exec = S_NAND_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_nand_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_NAND_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_NAND_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_nor_saveexec
-# GCN:      %exec = S_NOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_NOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_nor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_NOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_NOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
 # GCN-LABEL: name: reduce_xnor_saveexec
-# GCN:      %exec = S_XNOR_B64 %exec, killed %vcc
+# GCN:      $exec = S_XNOR_B64 $exec, killed $vcc
 # GCN-NEXT: S_ENDPGM
 name: reduce_xnor_saveexec
 tracksRegLiveness: true
 body:             |
   bb.0:
-    %vcc = IMPLICIT_DEF
-    %sgpr0_sgpr1 = S_XNOR_B64 %exec, killed %vcc, implicit-def %scc
-    %exec = COPY killed %sgpr0_sgpr1
+    $vcc = IMPLICIT_DEF
+    $sgpr0_sgpr1 = S_XNOR_B64 $exec, killed $vcc, implicit-def $scc
+    $exec = COPY killed $sgpr0_sgpr1
     S_ENDPGM
 ...
 ---
diff --git a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
index a031353..ed52867 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoal-subrange-join.mir
@@ -4,8 +4,8 @@
 # This test will provoke a subrange join (see annotations below) during simple register coalescing
 # Without a fix for PR33524 this causes an unreachable in SubRange Join
 #
-# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY %sgpr5
-# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY %sgpr2
+# GCN-DAG: undef %[[REG0:[0-9]+]].sub0:sgpr_64 = COPY $sgpr5
+# GCN-DAG: undef %[[REG1:[0-9]+]].sub0:sgpr_64 = COPY $sgpr2
 # GCN-DAG: %[[REG0]].sub1:sgpr_64 = S_MOV_B32 1
 # GCN-DAG: %[[REG1]].sub1:sgpr_64 = S_MOV_B32 1
 
@@ -82,14 +82,14 @@
   - { id: 60, class: sreg_32_xm0 }
   - { id: 61, class: vreg_128 }
 liveins:
-  - { reg: '%sgpr2', virtual-reg: '%12' }
-  - { reg: '%sgpr5', virtual-reg: '%15' }
+  - { reg: '$sgpr2', virtual-reg: '%12' }
+  - { reg: '$sgpr5', virtual-reg: '%15' }
 body:             |
   bb.0:
-    liveins: %sgpr2, %sgpr5
+    liveins: $sgpr2, $sgpr5
 
-    %15 = COPY killed %sgpr5
-    %12 = COPY killed %sgpr2
+    %15 = COPY killed $sgpr5
+    %12 = COPY killed $sgpr2
     %17 = S_MOV_B32 1
     undef %18.sub1 = COPY %17
     %0 = COPY %18
@@ -104,7 +104,7 @@
     %1 = COPY killed %25
     %26 = S_LOAD_DWORDX2_IMM %0, 2, 0
     dead %27 = S_LOAD_DWORD_IMM killed %26, 0, 0
-    S_CBRANCH_SCC0 %bb.1, implicit undef %scc
+    S_CBRANCH_SCC0 %bb.1, implicit undef $scc
 
   bb.5:
     %58 = COPY killed %1
@@ -112,11 +112,11 @@
     S_BRANCH %bb.2
 
   bb.1:
-    %30 = V_MOV_B32_e32 1036831949, implicit %exec
-    %31 = V_ADD_F32_e32 %30, %1.sub3, implicit %exec
-    %33 = V_ADD_F32_e32 %30, %1.sub2, implicit %exec
-    %35 = V_ADD_F32_e32 %30, %1.sub1, implicit %exec
-    %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit %exec
+    %30 = V_MOV_B32_e32 1036831949, implicit $exec
+    %31 = V_ADD_F32_e32 %30, %1.sub3, implicit $exec
+    %33 = V_ADD_F32_e32 %30, %1.sub2, implicit $exec
+    %35 = V_ADD_F32_e32 %30, %1.sub1, implicit $exec
+    %37 = V_ADD_F32_e32 killed %30, killed %1.sub0, implicit $exec
     undef %56.sub0 = COPY killed %37
     %56.sub1 = COPY killed %35
     %56.sub2 = COPY killed %33
@@ -131,7 +131,7 @@
     %3 = COPY killed %58
     %39 = S_LOAD_DWORDX2_IMM killed %0, 6, 0
     %40 = S_LOAD_DWORD_IMM killed %39, 0, 0
-    %43 = V_MOV_B32_e32 -1102263091, implicit %exec
+    %43 = V_MOV_B32_e32 -1102263091, implicit $exec
     %60 = COPY killed %4
     %61 = COPY killed %3
 
@@ -140,23 +140,23 @@
 
     %7 = COPY killed %61
     %6 = COPY killed %60
-    %8 = S_ADD_I32 killed %6, 1, implicit-def dead %scc
-    %44 = V_ADD_F32_e32 %43, %7.sub3, implicit %exec
-    %46 = V_ADD_F32_e32 %43, %7.sub2, implicit %exec
-    %48 = V_ADD_F32_e32 %43, %7.sub1, implicit %exec
-    %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit %exec
+    %8 = S_ADD_I32 killed %6, 1, implicit-def dead $scc
+    %44 = V_ADD_F32_e32 %43, %7.sub3, implicit $exec
+    %46 = V_ADD_F32_e32 %43, %7.sub2, implicit $exec
+    %48 = V_ADD_F32_e32 %43, %7.sub1, implicit $exec
+    %50 = V_ADD_F32_e32 %43, killed %7.sub0, implicit $exec
     undef %57.sub0 = COPY killed %50
     %57.sub1 = COPY killed %48
     %57.sub2 = COPY %46
     %57.sub3 = COPY killed %44
-    S_CMP_LT_I32 %8, %40, implicit-def %scc
+    S_CMP_LT_I32 %8, %40, implicit-def $scc
     %60 = COPY killed %8
     %61 = COPY killed %57
-    S_CBRANCH_SCC1 %bb.3, implicit killed %scc
+    S_CBRANCH_SCC1 %bb.3, implicit killed $scc
     S_BRANCH %bb.4
 
   bb.4:
-    EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit %exec
+    EXP 32, undef %53, undef %54, killed %46, undef %55, 0, 0, 15, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
index 69538d8..49fa3b9 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoalesce-dbg.mir
@@ -48,29 +48,29 @@
   - { id: 19, class: vreg_64 }
   - { id: 20, class: vreg_64 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY killed %vgpr0
-    %0 = COPY killed %sgpr0_sgpr1
+    %3 = COPY killed $vgpr0
+    %0 = COPY killed $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %5 = S_LOAD_DWORD_IMM killed %0, 13, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %18 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %18 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     undef %19.sub0 = COPY killed %3
     %19.sub1 = COPY killed %18
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
-    DBG_VALUE debug-use %11, debug-use %noreg, !1, !8, debug-location !9
+    DBG_VALUE debug-use %11, debug-use $noreg, !1, !8, debug-location !9
     undef %12.sub0 = COPY killed %11
     %12.sub1 = COPY killed %10
     undef %13.sub0_sub1 = COPY killed %4
     %13.sub2_sub3 = COPY killed %12
-    %20 = V_LSHL_B64 killed %19, 2, implicit %exec
+    %20 = V_LSHL_B64 killed %19, 2, implicit $exec
     %16 = COPY killed %5
-    BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    BUFFER_STORE_DWORD_ADDR64 killed %16, killed %20, killed %13, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir b/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
index 7ad474b..675d399 100644
--- a/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
+++ b/llvm/test/CodeGen/AMDGPU/regcoalesce-prune.mir
@@ -10,9 +10,9 @@
 tracksRegLiveness: true
 body: |
   bb.0:
-    undef %5.sub1 = V_MOV_B32_e32 0, implicit %exec
+    undef %5.sub1 = V_MOV_B32_e32 0, implicit $exec
     %6 = COPY %5
-    S_CBRANCH_VCCZ %bb.2, implicit undef %vcc
+    S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
 
   bb.1:
     %1 : sreg_32_xm0 = S_MOV_B32 0
@@ -23,9 +23,9 @@
     %6 : vreg_64 = COPY killed %4
 
   bb.2:
-    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit %exec
+    %2 : vgpr_32 = V_CVT_F32_I32_e32 killed %5.sub1, implicit $exec
 
   bb.3:
-    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit %exec
+    %3 : vgpr_32 = V_CVT_F32_I32_e32 killed %6.sub1, implicit $exec
     S_ENDPGM
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
index 08b3ecf..976e45d 100644
--- a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
+++ b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs-mac-operands.mir
@@ -2,7 +2,7 @@
 ---
 
 # GCN-LABEL: name: mac_invalid_operands
-# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit %exec
+# GCN: undef %18.sub0:vreg_128 = V_MAC_F32_e32 undef %3:vgpr_32, undef %9:vgpr_32, undef %18.sub0, implicit $exec
 
 name:            mac_invalid_operands
 alignment:       0
@@ -34,14 +34,14 @@
   bb.0:
     successors: %bb.2, %bb.1
 
-    %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit %exec
-    %vcc = COPY killed %7
-    S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
+    %7 = V_CMP_NEQ_F32_e64 0, 0, 0, undef %3, 0, implicit $exec
+    $vcc = COPY killed %7
+    S_CBRANCH_VCCZ %bb.2, implicit killed $vcc
 
   bb.1:
     successors: %bb.3
 
-    %4 = V_ADD_F32_e32 undef %6, undef %5, implicit %exec
+    %4 = V_ADD_F32_e32 undef %6, undef %5, implicit $exec
     undef %12.sub0 = COPY killed %4
     %17 = COPY killed %12
     S_BRANCH %bb.3
@@ -49,7 +49,7 @@
   bb.2:
     successors: %bb.3
 
-    %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit %exec
+    %8 = V_MAC_F32_e32 undef %3, undef %9, undef %8, implicit $exec
     undef %13.sub0 = COPY %8
     %13.sub1 = COPY %8
     %13.sub2 = COPY killed %8
@@ -58,12 +58,12 @@
 
   bb.3:
     %1 = COPY killed %17
-    FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD undef %10, %1.sub2, 0, 0, 0, implicit $exec, implicit $flat_scr
     %14 = COPY %1.sub1
     %16 = COPY killed %1.sub0
     undef %15.sub0 = COPY killed %16
     %15.sub1 = COPY killed %14
-    FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORDX2 undef %11, killed %15, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 
 ...
@@ -73,13 +73,13 @@
 
 # GCN-LABEL: name: vreg_does_not_dominate
 
-# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit %exec
-# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit %exec
+# GCN: undef %8.sub1:vreg_128 = V_MAC_F32_e32 undef %2:vgpr_32, undef %1:vgpr_32, undef %8.sub1, implicit $exec
+# GCN: undef %7.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec
 # GCN: undef %9.sub2:vreg_128 = COPY %7.sub0
 
-# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit %exec
-# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
-# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit %exec
+# GCN: undef %6.sub3:vreg_128 = V_ADD_F32_e32 undef %3:vgpr_32, undef %3:vgpr_32, implicit $exec
+# GCN: undef %7.sub0:vreg_128 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
+# GCN: %8.sub1:vreg_128 = V_ADD_F32_e32 %8.sub1, %8.sub1, implicit $exec
 
 # GCN: BUFFER_STORE_DWORD_OFFEN %6.sub3, %0,
 # GCN: BUFFER_STORE_DWORD_OFFEN %9.sub2, %0,
@@ -101,43 +101,43 @@
   - { id: 5, class: sreg_64, preferred-register: '' }
   - { id: 6, class: vreg_128, preferred-register: '' }
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%sgpr30_sgpr31', virtual-reg: '%5' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$sgpr30_sgpr31', virtual-reg: '%5' }
 body:             |
   bb.0:
     successors: %bb.2, %bb.1
-    liveins: %vgpr0, %sgpr30_sgpr31, %sgpr5
+    liveins: $vgpr0, $sgpr30_sgpr31, $sgpr5
 
-    %5 = COPY %sgpr30_sgpr31
-    %0 = COPY %vgpr0
-    undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit %exec
-    %6.sub0 = V_MOV_B32_e32 0, implicit %exec
+    %5 = COPY $sgpr30_sgpr31
+    %0 = COPY $vgpr0
+    undef %6.sub1 = V_MAC_F32_e32 undef %2, undef %1, undef %6.sub1, implicit $exec
+    %6.sub0 = V_MOV_B32_e32 0, implicit $exec
     %6.sub2 = COPY %6.sub0
-    S_CBRANCH_VCCNZ %bb.2, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.2, implicit undef $vcc
     S_BRANCH %bb.1
 
   bb.1:
     successors: %bb.2
 
-    %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit %exec
-    %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit %exec
-    %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit %exec
+    %6.sub3 = V_ADD_F32_e32 undef %3, undef %3, implicit $exec
+    %6.sub0 = V_ADD_F32_e64 0, 0, 0, 0, 0, 0, implicit $exec
+    %6.sub1 = V_ADD_F32_e32 %6.sub1, %6.sub1, implicit $exec
     %6.sub2 = COPY %6.sub0
 
   bb.2:
-    BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 12, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 8, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 4, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4, 0, 0, 0, 0, implicit %exec
-    %sgpr30_sgpr31 = COPY %5
-    %sgpr5 = COPY %sgpr5
-    S_SETPC_B64_return %sgpr30_sgpr31, implicit %sgpr5
+    BUFFER_STORE_DWORD_OFFEN %6.sub3, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 12, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub2, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 8, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub1, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 4, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %6.sub0, %0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4, 0, 0, 0, 0, implicit $exec
+    $sgpr30_sgpr31 = COPY %5
+    $sgpr5 = COPY $sgpr5
+    S_SETPC_B64_return $sgpr30_sgpr31, implicit $sgpr5
 
 ...
 
 # GCN-LABEL: name: inf_loop_tied_operand
 # GCN: bb.0:
-# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit %exec
+# GCN-NEXT: undef %2.sub0:vreg_128 = V_MAC_F32_e32 1073741824, undef %0:vgpr_32, undef %2.sub0, implicit $exec
 # GCN-NEXT: dead undef %3.sub1:vreg_128 = COPY %2.sub0
 
 name:            inf_loop_tied_operand
@@ -148,7 +148,7 @@
   - { id: 2, class: vreg_128, preferred-register: '' }
 body:             |
   bb.0:
-    %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit %exec
+    %1 = V_MAC_F32_e32 1073741824, undef %0, undef %1, implicit $exec
     undef %2.sub0 = COPY %1
     %2.sub1 = COPY %1
 
diff --git a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs.mir b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs.mir
index 31ad26e..7eea5d9 100644
--- a/llvm/test/CodeGen/AMDGPU/rename-independent-subregs.mir
+++ b/llvm/test/CodeGen/AMDGPU/rename-independent-subregs.mir
@@ -50,7 +50,7 @@
 body: |
   bb.0:
     S_NOP 0, implicit-def undef %0.sub2
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir b/llvm/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
index 5bee36d..2cab7e5 100644
--- a/llvm/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
+++ b/llvm/test/CodeGen/AMDGPU/scalar-store-cache-flush.mir
@@ -56,7 +56,7 @@
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -72,7 +72,7 @@
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_DCACHE_WB
     S_ENDPGM
 ...
@@ -91,7 +91,7 @@
 body: |
   bb.0:
     S_DCACHE_WB
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -122,11 +122,11 @@
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
     S_ENDPGM
 
   bb.1:
-    S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0
     S_ENDPGM
 ...
 ...
@@ -152,7 +152,7 @@
     S_ENDPGM
 
   bb.1:
-    S_STORE_DWORD_SGPR undef %sgpr4, undef %sgpr6_sgpr7, undef %m0, 0
+    S_STORE_DWORD_SGPR undef $sgpr4, undef $sgpr6_sgpr7, undef $m0, 0
     S_ENDPGM
 ...
 ---
@@ -168,6 +168,6 @@
 
 body: |
   bb.0:
-    S_STORE_DWORD_SGPR undef %sgpr2, undef %sgpr0_sgpr1, undef %m0, 0
-    SI_RETURN_TO_EPILOG undef %vgpr0
+    S_STORE_DWORD_SGPR undef $sgpr2, undef $sgpr0_sgpr1, undef $m0, 0
+    SI_RETURN_TO_EPILOG undef $vgpr0
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
index bf1fdca..8684ef2 100644
--- a/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
+++ b/llvm/test/CodeGen/AMDGPU/sched-crash-dbg-value.mir
@@ -169,7 +169,7 @@
 ---
 
 # CHECK: name: sched_dbg_value_crash
-# CHECK: DBG_VALUE debug-use %99, debug-use %noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
+# CHECK: DBG_VALUE debug-use %99, debug-use $noreg, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
 
 name:            sched_dbg_value_crash
 alignment:       0
@@ -179,11 +179,11 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '%0' }
-  - { reg: '%vgpr1', virtual-reg: '%1' }
-  - { reg: '%vgpr2', virtual-reg: '%2' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%3' }
-  - { reg: '%sgpr6_sgpr7', virtual-reg: '%4' }
+  - { reg: '$vgpr0', virtual-reg: '%0' }
+  - { reg: '$vgpr1', virtual-reg: '%1' }
+  - { reg: '$vgpr2', virtual-reg: '%2' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%3' }
+  - { reg: '$sgpr6_sgpr7', virtual-reg: '%4' }
 fixedStack:
 stack:
   - { id: 0, name: tmp5, type: default, offset: 0, size: 128, alignment: 16,
@@ -192,104 +192,104 @@
 constants:
 body:             |
   bb.0.bb:
-    liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr4_sgpr5, %sgpr6_sgpr7, %sgpr32, %sgpr101
+    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr32, $sgpr101
 
-    %4:sgpr_64 = COPY %sgpr6_sgpr7
-    %3:sgpr_64 = COPY %sgpr4_sgpr5
-    %2:vgpr_32 = COPY %vgpr2
-    %1:vgpr_32 = COPY %vgpr1
-    %0:vgpr_32 = COPY %vgpr0
+    %4:sgpr_64 = COPY $sgpr6_sgpr7
+    %3:sgpr_64 = COPY $sgpr4_sgpr5
+    %2:vgpr_32 = COPY $vgpr2
+    %1:vgpr_32 = COPY $vgpr1
+    %0:vgpr_32 = COPY $vgpr0
     %5:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %6:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 8, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %7:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 16, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 24, 0
     %9:sreg_64_xexec = S_LOAD_DWORDX2_IMM %4, 32, 0
     %10:sreg_64_xexec = S_LOAD_DWORDX2_IMM %3, 4, 0
-    %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead %scc
+    %11:sreg_32_xm0 = S_LSHR_B32 %10.sub0, 16, implicit-def dead $scc
     %12:sreg_32_xm0 = S_MUL_I32 %11, %10.sub1
-    %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit %exec
-    %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit %exec
-    %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead %vcc, implicit %exec
-    %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead %vcc, implicit %exec
+    %13:vgpr_32 = V_MUL_LO_I32 0, %0, implicit $exec
+    %14:vgpr_32 = V_MUL_LO_I32 %1, %10.sub1, implicit $exec
+    %15:vgpr_32 = V_ADD_I32_e32 0, %13, implicit-def dead $vcc, implicit $exec
+    %16:vgpr_32 = V_ADD_I32_e32 0, %15, implicit-def dead $vcc, implicit $exec
     %17:vgpr_32 = IMPLICIT_DEF
     %18:sreg_64 = S_MOV_B64 0
     %19:sreg_32_xm0_xexec = IMPLICIT_DEF
-    %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead %vcc, implicit %exec
-    %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit %exec
-    %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit %exec
-    %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit %exec
+    %20:vgpr_32 = V_ADD_I32_e32 %19, %0, implicit-def dead $vcc, implicit $exec
+    %21:vreg_64, dead %22:sreg_64 = V_MAD_I64_I32 %20, 12, %7, 0, implicit $exec
+    %23:vgpr_32 = GLOBAL_LOAD_DWORD %21, 4, 0, 0, implicit $exec
+    %24:vreg_64, dead %25:sreg_64 = V_MAD_I64_I32 %20, 48, %8, 0, implicit $exec
     %26:vreg_128 = IMPLICIT_DEF
     undef %27.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 0, 0
     %27.sub1:sreg_64_xexec = S_MOV_B32 0
-    %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc
-    undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def %scc
-    %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead %scc, implicit killed %scc
+    %28:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc
+    undef %29.sub0:sreg_64 = S_ADD_U32 %5.sub0, %28.sub0, implicit-def $scc
+    %29.sub1:sreg_64 = S_ADDC_U32 %5.sub1, %28.sub1, implicit-def dead $scc, implicit killed $scc
     undef %30.sub0:sreg_64_xexec = S_LOAD_DWORD_IMM %6, 4, 0
     %27.sub0:sreg_64_xexec = IMPLICIT_DEF
-    %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead %scc
-    %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def %scc
-    %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead %scc, implicit killed %scc
+    %31:sreg_64 = S_LSHL_B64 %27, 2, implicit-def dead $scc
+    %32:sreg_32_xm0 = S_ADD_U32 0, %31.sub0, implicit-def $scc
+    %33:sgpr_32 = S_ADDC_U32 %5.sub1, %31.sub1, implicit-def dead $scc, implicit killed $scc
     %34:vgpr_32 = IMPLICIT_DEF
-    %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit %exec
-    %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit %exec
-    undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit %exec
+    %35:vreg_64, dead %36:sreg_64 = V_MAD_I64_I32 %23, %34, 0, 0, implicit $exec
+    %37:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 32, 0, 0, implicit $exec
+    undef %38.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %37.sub0, implicit $exec
     %38.sub0:vreg_64 = COPY %37.sub0
-    %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit %exec
-    undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit %exec
+    %39:vreg_64 = V_LSHLREV_B64 3, %38, implicit $exec
+    undef %40.sub0:vreg_64, %41:sreg_64_xexec = V_ADD_I32_e64 0, %39.sub0, implicit $exec
     %42:vgpr_32 = COPY %33
-    %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit %exec
-    %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit %exec :: (load 8 from %ir.tmp34)
+    %40.sub1:vreg_64, dead %43:sreg_64_xexec = V_ADDC_U32_e64 %42, %39.sub1, %41, implicit $exec
+    %44:vreg_64 = GLOBAL_LOAD_DWORDX2 %40, 0, 0, 0, implicit $exec :: (load 8 from %ir.tmp34)
     undef %45.sub1:vreg_64 = IMPLICIT_DEF
     %45.sub0:vreg_64 = COPY %37.sub1
-    %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit %exec
-    undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit %exec
+    %46:vreg_64 = V_LSHLREV_B64 3, %45, implicit $exec
+    undef %47.sub0:vreg_64, %48:sreg_64_xexec = V_ADD_I32_e64 %32, %46.sub0, implicit $exec
     %49:vgpr_32 = COPY %33
-    %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit %exec
+    %47.sub1:vreg_64, dead %50:sreg_64_xexec = V_ADDC_U32_e64 %49, %46.sub1, %48, implicit $exec
     %51:vreg_64 = IMPLICIT_DEF
-    undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit %exec :: (load 4 from %ir.18 + 8)
+    undef %52.sub0:vreg_64 = GLOBAL_LOAD_DWORD %35, 40, 0, 0, implicit $exec :: (load 4 from %ir.18 + 8)
     %52.sub1:vreg_64 = IMPLICIT_DEF
-    %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit %exec
-    undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit %exec
+    %53:vreg_64 = V_LSHLREV_B64 3, %52, implicit $exec
+    undef %54.sub0:vreg_64, %55:sreg_64_xexec = V_ADD_I32_e64 0, %53.sub0, implicit $exec
     %56:vgpr_32 = COPY %33
-    %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit %exec
+    %54.sub1:vreg_64, dead %57:sreg_64_xexec = V_ADDC_U32_e64 0, %53.sub1, %55, implicit $exec
     %58:vreg_64 = IMPLICIT_DEF
     %30.sub1:sreg_64_xexec = IMPLICIT_DEF
     %59:sreg_64 = IMPLICIT_DEF
-    %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def %scc
-    %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead %scc, implicit killed %scc
-    %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit %exec :: (load 8 from %ir.20, align 4)
-    undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit %exec
+    %60:sreg_32_xm0 = S_ADD_U32 %5.sub0, %59.sub0, implicit-def $scc
+    %61:sgpr_32 = S_ADDC_U32 %5.sub1, %59.sub1, implicit-def dead $scc, implicit killed $scc
+    %62:vreg_64 = GLOBAL_LOAD_DWORDX2 %35, 0, 0, 0, implicit $exec :: (load 8 from %ir.20, align 4)
+    undef %63.sub1:vreg_64 = V_ASHRREV_I32_e32 31, %62.sub0, implicit $exec
     %63.sub0:vreg_64 = COPY %62.sub0
     %64:vreg_64 = IMPLICIT_DEF
-    undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit %exec
+    undef %65.sub0:vreg_64, %66:sreg_64_xexec = V_ADD_I32_e64 %60, %64.sub0, implicit $exec
     %67:vgpr_32 = COPY %61
-    %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit %exec
-    %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit %exec :: (load 16 from %ir.tmp58)
+    %65.sub1:vreg_64, dead %68:sreg_64_xexec = V_ADDC_U32_e64 %67, %64.sub1, %66, implicit $exec
+    %69:vreg_128 = GLOBAL_LOAD_DWORDX4 %65, 0, 0, 0, implicit $exec :: (load 16 from %ir.tmp58)
     undef %70.sub1:vreg_64 = IMPLICIT_DEF
     %70.sub0:vreg_64 = IMPLICIT_DEF
     %71:vreg_64 = IMPLICIT_DEF
-    undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit %exec
+    undef %72.sub0:vreg_64, %73:sreg_64_xexec = V_ADD_I32_e64 %60, %71.sub0, implicit $exec
     %74:vgpr_32 = COPY %61
-    %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit %exec
-    %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit %exec
+    %72.sub1:vreg_64, dead %75:sreg_64_xexec = V_ADDC_U32_e64 0, %71.sub1, %73, implicit $exec
+    %76:vreg_128 = GLOBAL_LOAD_DWORDX4 %72, 0, 0, 0, implicit $exec
     %77:vgpr_32 = IMPLICIT_DEF
     %78:vgpr_32 = IMPLICIT_DEF
-    %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit %exec
+    %79:vgpr_32 = V_MUL_F32_e32 0, %77, implicit $exec
     %80:vgpr_32 = IMPLICIT_DEF
     %81:vgpr_32 = IMPLICIT_DEF
     %84:vgpr_32 = IMPLICIT_DEF
-    BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 108, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 104, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 100, 0, 0, 0, implicit %exec
-    BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr101, 96, 0, 0, 0, implicit %exec
+    BUFFER_STORE_DWORD_OFFEN %84, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 108, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %81, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 104, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %80, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 100, 0, 0, 0, implicit $exec
+    BUFFER_STORE_DWORD_OFFEN %78, %stack.0.tmp5, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr101, 96, 0, 0, 0, implicit $exec
     %85:vgpr_32 = IMPLICIT_DEF
     %86:vgpr_32 = IMPLICIT_DEF
     %87:vgpr_32 = IMPLICIT_DEF
     %88:vgpr_32 = IMPLICIT_DEF
     %90:vgpr_32 = IMPLICIT_DEF
-    %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit %exec
-    %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit %exec
-    %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit %exec
+    %91:vgpr_32, dead %92:sreg_64 = V_DIV_SCALE_F32 %90, %90, 1065353216, implicit $exec
+    %95:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, undef %93:vgpr_32, 0, 0, implicit $exec
+    %96:vgpr_32, %97:sreg_64 = V_DIV_SCALE_F32 1065353216, %90, 1065353216, implicit $exec
     %98:vgpr_32 = IMPLICIT_DEF
     %99:vgpr_32 = IMPLICIT_DEF
     %100:vgpr_32 = IMPLICIT_DEF
@@ -298,18 +298,18 @@
     %103:vgpr_32 = IMPLICIT_DEF
     %104:vgpr_32 = IMPLICIT_DEF
     %105:vgpr_32 = IMPLICIT_DEF
-    %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit %exec
-    %108:vgpr_32 = V_RCP_F32_e32 0, implicit %exec
+    %106:vgpr_32, dead %107:sreg_64 = V_DIV_SCALE_F32 %90, %90, %105, implicit $exec
+    %108:vgpr_32 = V_RCP_F32_e32 0, implicit $exec
     %109:vgpr_32 = IMPLICIT_DEF
-    %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit %exec
-    %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit %exec
-    %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit %exec
+    %110:vgpr_32 = V_FMA_F32 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec
+    %111:vgpr_32, %112:sreg_64 = V_DIV_SCALE_F32 0, 0, 0, implicit $exec
+    %113:vgpr_32 = V_MUL_F32_e32 0, %110, implicit $exec
     %114:vgpr_32 = IMPLICIT_DEF
     %115:vgpr_32 = IMPLICIT_DEF
     %116:vgpr_32 = IMPLICIT_DEF
-    %vcc = IMPLICIT_DEF
-    %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed %vcc, implicit %exec
-    %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit %exec
+    $vcc = IMPLICIT_DEF
+    %117:vgpr_32 = V_DIV_FMAS_F32 0, %116, 0, %110, 0, %115, 0, 0, implicit killed $vcc, implicit $exec
+    %118:vgpr_32 = V_DIV_FIXUP_F32 0, %117, 0, %90, 0, %105, 0, 0, implicit $exec
     %119:vgpr_32 = IMPLICIT_DEF
     %120:vgpr_32 = IMPLICIT_DEF
     %121:vgpr_32 = IMPLICIT_DEF
@@ -319,15 +319,15 @@
     %125:vgpr_32 = IMPLICIT_DEF
     %126:vgpr_32 = IMPLICIT_DEF
     DBG_VALUE debug-use %103, debug-use _, !5, !DIExpression(DW_OP_constu, 1, DW_OP_swap, DW_OP_xderef), debug-location !8
-    ADJCALLSTACKUP 0, 0, implicit-def %sgpr32, implicit %sgpr32
-    %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead %scc
-    %sgpr4 = COPY %sgpr101
-    %vgpr0 = COPY %124
-    %vgpr1_vgpr2 = IMPLICIT_DEF
-    %vgpr3 = COPY %126
-    dead %sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr4, implicit %vgpr0, implicit %vgpr1_vgpr2, implicit killed %vgpr3
-    ADJCALLSTACKDOWN 0, 0, implicit-def %sgpr32, implicit %sgpr32
-    %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit %exec
+    ADJCALLSTACKUP 0, 0, implicit-def $sgpr32, implicit $sgpr32
+    %127:sreg_64 = SI_PC_ADD_REL_OFFSET target-flags(amdgpu-rel32-lo) @func + 4, target-flags(amdgpu-rel32-hi) @func + 4, implicit-def dead $scc
+    $sgpr4 = COPY $sgpr101
+    $vgpr0 = COPY %124
+    $vgpr1_vgpr2 = IMPLICIT_DEF
+    $vgpr3 = COPY %126
+    dead $sgpr30_sgpr31 = SI_CALL %127, @func, csr_amdgpu_highregs, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr4, implicit $vgpr0, implicit $vgpr1_vgpr2, implicit killed $vgpr3
+    ADJCALLSTACKDOWN 0, 0, implicit-def $sgpr32, implicit $sgpr32
+    %128:vreg_64, dead %129:sreg_64 = V_MAD_I64_I32 %20, %34, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
index afc2fab..348b8f5 100644
--- a/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
+++ b/llvm/test/CodeGen/AMDGPU/schedule-regpressure.mir
@@ -4,7 +4,7 @@
 # Check there is no SReg_32 pressure created by DS_* instructions because of M0 use
 
 # CHECK: ScheduleDAGMILive::schedule starting
-# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit %m0, implicit %exec
+# CHECK: SU({{.*}} = DS_READ_B32 {{.*}} implicit $m0, implicit $exec
 # CHECK: Pressure Diff : {{$}}
 # CHECK: SU({{.*}} DS_WRITE_B32
 
@@ -27,7 +27,7 @@
   - { id: 7, class: vgpr_32 }
   - { id: 8, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%1' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -44,14 +44,14 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %1 = COPY %sgpr4_sgpr5
+    %1 = COPY $sgpr4_sgpr5
     %5 = S_LOAD_DWORD_IMM %1, 0, 0 :: (non-temporal dereferenceable invariant load 4 from `i32 addrspace(2)* undef`)
-    %m0 = S_MOV_B32 -1
+    $m0 = S_MOV_B32 -1
     %7 = COPY %5
-    %6 = DS_READ_B32 %7, 0, 0, implicit %m0, implicit %exec
-    DS_WRITE_B32 %7, %6, 4, 0, implicit killed %m0, implicit %exec
+    %6 = DS_READ_B32 %7, 0, 0, implicit $m0, implicit $exec
+    DS_WRITE_B32 %7, %6, 4, 0, implicit killed $m0, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir b/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
index 2196e7e..a5e061d 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-gfx9.mir
@@ -5,13 +5,13 @@
 # GCN-LABEL: {{^}}name: add_shr_i32
 # GCN: [[SMOV:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 123
 
-# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def %vcc, implicit %exec
+# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# CI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 [[SMOV]], killed [[SHIFT]], implicit-def $vcc, implicit $exec
 
-# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec
+# VI: [[VMOV:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 [[SMOV]], implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[VMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def %vcc, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_sdwa 0, [[SMOV]], 0, %{{[0-9]+}}, 0, 6, 0, 6, 5, implicit-def $vcc, implicit $exec
 
 ---
 name:            add_shr_i32
@@ -32,30 +32,30 @@
   - { id: 12, class: sreg_32_xm0 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
     %12 = S_MOV_B32 123
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_ADD_I32_e32 %12, killed %10, implicit-def %vcc, implicit %exec
-    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_ADD_I32_e32 %12, killed %10, implicit-def $vcc, implicit $exec
+    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 
 # GCN-LABEL: {{^}}name: trunc_shr_f32
 
-# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec
+# CI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# CI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
 
-# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def %vcc, implicit %exec
+# VI: [[SHIFT:%[0-9]+]]:vgpr_32 = V_LSHRREV_B32_e64 16, %{{[0-9]+}}, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_e64 0, killed [[SHIFT]], 1, 2, implicit-def $vcc, implicit $exec
 
-#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit %exec
+#GFX9: %{{[0-9]+}}:vgpr_32 = V_TRUNC_F32_sdwa 0, %{{[0-9]+}}, 1, 2, 6, 0, 5, implicit $exec
 
 ---
 name:            trunc_shr_f32
@@ -75,14 +75,14 @@
   - { id: 11, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def %vcc, implicit %exec
-    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_TRUNC_F32_e64 0, killed %10, 1, 2, implicit-def $vcc, implicit $exec
+    FLAT_STORE_DWORD %0, %11, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
index 0d1534e..7c3e6a9 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-peephole-instr.mir
@@ -3,29 +3,29 @@
 
 # GFX89-LABEL: {{^}}name: vop1_instructions
 
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
 
 
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit %exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_sdwa 0, %{{[0-9]+}}, 0, 6, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 0, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, implicit $exec
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit %exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_e64 %{{[0-9]+}}, 0, 1, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_FRACT_F32_sdwa 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SIN_F32_sdwa 0, %{{[0-9]+}}, 1, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_U32_F32_sdwa 1, %{{[0-9]+}}, 0, 5, 0, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_CVT_F32_I32_sdwa 0, %{{[0-9]+}}, 0, 1, 5, 0, 5, implicit $exec
 
 
 ---
@@ -84,105 +84,105 @@
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %10 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %11 = V_MOV_B32_e32 %10, implicit %exec
-    %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
-    %14 = V_FRACT_F32_e32 123, implicit %exec
-    %15 = V_LSHLREV_B32_e64 16, %14, implicit %exec
-    %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
-    %17 = V_SIN_F32_e32 %16, implicit %exec
-    %18 = V_LSHLREV_B32_e64 16, %17, implicit %exec
-    %19 = V_LSHRREV_B32_e64 16, %18, implicit %exec
-    %20 = V_CVT_U32_F32_e32 %19, implicit %exec
-    %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
-    %23 = V_CVT_F32_I32_e32 123, implicit %exec
-    %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
+    %10 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %11 = V_MOV_B32_e32 %10, implicit $exec
+    %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
+    %14 = V_FRACT_F32_e32 123, implicit $exec
+    %15 = V_LSHLREV_B32_e64 16, %14, implicit $exec
+    %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
+    %17 = V_SIN_F32_e32 %16, implicit $exec
+    %18 = V_LSHLREV_B32_e64 16, %17, implicit $exec
+    %19 = V_LSHRREV_B32_e64 16, %18, implicit $exec
+    %20 = V_CVT_U32_F32_e32 %19, implicit $exec
+    %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
+    %23 = V_CVT_F32_I32_e32 123, implicit $exec
+    %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
 
-    %25 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %26 = V_MOV_B32_e64 %25, implicit %exec
-    %26 = V_LSHLREV_B32_e64 16, %26, implicit %exec
-    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit %exec
-    %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
-    %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
-    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit %exec
-    %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
-    %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
-    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit %exec
-    %34 = V_LSHLREV_B32_e64 16, %33, implicit %exec
-    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit %exec
-    %36 = V_LSHLREV_B32_e64 16, %35, implicit %exec
+    %25 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %26 = V_MOV_B32_e64 %25, implicit $exec
+    %26 = V_LSHLREV_B32_e64 16, %26, implicit $exec
+    %27 = V_FRACT_F32_e64 0, %6, 0, 0, implicit $exec
+    %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
+    %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
+    %30 = V_SIN_F32_e64 0, %29, 0, 0, implicit $exec
+    %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
+    %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
+    %33 = V_CVT_U32_F32_e64 0, %32, 0, 0, implicit $exec
+    %34 = V_LSHLREV_B32_e64 16, %33, implicit $exec
+    %35 = V_CVT_F32_I32_e64 %6, 0, 0, implicit $exec
+    %36 = V_LSHLREV_B32_e64 16, %35, implicit $exec
 
 
-    %37 = V_LSHRREV_B32_e64 16, %36, implicit %exec
-    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit %exec
-    %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
-    %40 = V_LSHRREV_B32_e64 16, %39, implicit %exec
-    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit %exec
-    %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
-    %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
-    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit %exec
-    %45 = V_LSHLREV_B32_e64 16, %44, implicit %exec
-    %46 = V_LSHRREV_B32_e64 16, %45, implicit %exec
-    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit %exec
-    %48 = V_LSHLREV_B32_e64 16, %47, implicit %exec
+    %37 = V_LSHRREV_B32_e64 16, %36, implicit $exec
+    %38 = V_FRACT_F32_e64 1, %37, 0, 0, implicit $exec
+    %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
+    %40 = V_LSHRREV_B32_e64 16, %39, implicit $exec
+    %41 = V_SIN_F32_e64 0, %40, 1, 0, implicit $exec
+    %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
+    %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
+    %44 = V_CVT_U32_F32_e64 1, %43, 0, 0, implicit $exec
+    %45 = V_LSHLREV_B32_e64 16, %44, implicit $exec
+    %46 = V_LSHRREV_B32_e64 16, %45, implicit $exec
+    %47 = V_CVT_F32_I32_e64 %46, 0, 1, implicit $exec
+    %48 = V_LSHLREV_B32_e64 16, %47, implicit $exec
 
 
-    %100 = V_MOV_B32_e32 %48, implicit %exec
+    %100 = V_MOV_B32_e32 %48, implicit $exec
 
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 ---
 # GCN-LABEL: {{^}}name: vop2_instructions
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e32 %{{[0-9]+}}, %{{[0-9]+}}, %{{[0-9]+}}, implicit $exec
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit %exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, %{{[0-9]+}}, 0, 0, 6, 0, 5, 1, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_AND_B32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 5, 0, 6, 5, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 0, 23, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 0, implicit $exec
 
 
-# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit %exec
-# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, %{{[0-9]+}}, 1, 0, 6, 0, 6, 1, implicit $exec
+# VI: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
 
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit %exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_ADD_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 5, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_SUB_F16_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 0, 5, 0, 6, 1, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F32_e64 1, 23, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 0, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MAC_F16_e64 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 2, implicit $exec
 
 name:            vop2_instructions
 tracksRegLiveness: true
@@ -251,114 +251,114 @@
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %11 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %12 = V_AND_B32_e32 %6, %11, implicit %exec
-    %13 = V_LSHLREV_B32_e64 16, %12, implicit %exec
-    %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
-    %15 = V_BFE_U32 %13, 8, 8, implicit %exec
-    %16 = V_ADD_F32_e32 %14, %15, implicit %exec
-    %17 = V_LSHLREV_B32_e64 16, %16, implicit %exec
-    %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
-    %19 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %20 = V_SUB_F16_e32 %18, %19, implicit %exec
-    %21 = V_LSHLREV_B32_e64 16, %20, implicit %exec
-    %22 = V_BFE_U32 %20, 8, 8, implicit %exec
-    %23 = V_MAC_F32_e32 %21, %22, %22, implicit %exec
-    %24 = V_LSHLREV_B32_e64 16, %23, implicit %exec
-    %25 = V_LSHRREV_B32_e64 16, %24, implicit %exec
-    %26 = V_BFE_U32 %24, 8, 8, implicit %exec
-    %27 = V_MAC_F16_e32 %25, %26, %26, implicit %exec
-    %28 = V_LSHLREV_B32_e64 16, %27, implicit %exec
+    %11 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %12 = V_AND_B32_e32 %6, %11, implicit $exec
+    %13 = V_LSHLREV_B32_e64 16, %12, implicit $exec
+    %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
+    %15 = V_BFE_U32 %13, 8, 8, implicit $exec
+    %16 = V_ADD_F32_e32 %14, %15, implicit $exec
+    %17 = V_LSHLREV_B32_e64 16, %16, implicit $exec
+    %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
+    %19 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %20 = V_SUB_F16_e32 %18, %19, implicit $exec
+    %21 = V_LSHLREV_B32_e64 16, %20, implicit $exec
+    %22 = V_BFE_U32 %20, 8, 8, implicit $exec
+    %23 = V_MAC_F32_e32 %21, %22, %22, implicit $exec
+    %24 = V_LSHLREV_B32_e64 16, %23, implicit $exec
+    %25 = V_LSHRREV_B32_e64 16, %24, implicit $exec
+    %26 = V_BFE_U32 %24, 8, 8, implicit $exec
+    %27 = V_MAC_F16_e32 %25, %26, %26, implicit $exec
+    %28 = V_LSHLREV_B32_e64 16, %27, implicit $exec
 
-    %29 = V_LSHRREV_B32_e64 16, %28, implicit %exec
-    %30 = V_AND_B32_e64 23, %29, implicit %exec
-    %31 = V_LSHLREV_B32_e64 16, %30, implicit %exec
-    %32 = V_LSHRREV_B32_e64 16, %31, implicit %exec
-    %33 = V_BFE_U32 %31, 8, 8, implicit %exec
-    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit %exec
-    %35 = V_LSHLREV_B32_e64 16, %34, implicit %exec
-    %37 = V_BFE_U32 %35, 8, 8, implicit %exec
-    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit %exec
-    %39 = V_LSHLREV_B32_e64 16, %38, implicit %exec
-    %40 = V_BFE_U32 %39, 8, 8, implicit %exec
-    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit %exec
-    %42 = V_LSHLREV_B32_e64 16, %41, implicit %exec
-    %43 = V_LSHRREV_B32_e64 16, %42, implicit %exec
-    %44 = V_BFE_U32 %42, 8, 8, implicit %exec
-    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit %exec
-    %46 = V_LSHLREV_B32_e64 16, %45, implicit %exec
+    %29 = V_LSHRREV_B32_e64 16, %28, implicit $exec
+    %30 = V_AND_B32_e64 23, %29, implicit $exec
+    %31 = V_LSHLREV_B32_e64 16, %30, implicit $exec
+    %32 = V_LSHRREV_B32_e64 16, %31, implicit $exec
+    %33 = V_BFE_U32 %31, 8, 8, implicit $exec
+    %34 = V_ADD_F32_e64 0, %32, 0, %33, 0, 0, implicit $exec
+    %35 = V_LSHLREV_B32_e64 16, %34, implicit $exec
+    %37 = V_BFE_U32 %35, 8, 8, implicit $exec
+    %38 = V_SUB_F16_e64 0, 23, 0, %37, 0, 0, implicit $exec
+    %39 = V_LSHLREV_B32_e64 16, %38, implicit $exec
+    %40 = V_BFE_U32 %39, 8, 8, implicit $exec
+    %41 = V_MAC_F32_e64 0, 23, 0, %40, 0, %40, 0, 0, implicit $exec
+    %42 = V_LSHLREV_B32_e64 16, %41, implicit $exec
+    %43 = V_LSHRREV_B32_e64 16, %42, implicit $exec
+    %44 = V_BFE_U32 %42, 8, 8, implicit $exec
+    %45 = V_MAC_F16_e64 0, %43, 0, %44, 0, %44, 0, 0, implicit $exec
+    %46 = V_LSHLREV_B32_e64 16, %45, implicit $exec
 
-    %47 = V_LSHRREV_B32_e64 16, %46, implicit %exec
-    %48 = V_BFE_U32 %46, 8, 8, implicit %exec
-    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit %exec
-    %50 = V_LSHLREV_B32_e64 16, %49, implicit %exec
-    %51 = V_BFE_U32 %50, 8, 8, implicit %exec
-    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit %exec
-    %53 = V_LSHLREV_B32_e64 16, %52, implicit %exec
-    %54 = V_BFE_U32 %53, 8, 8, implicit %exec
-    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit %exec
-    %56 = V_LSHLREV_B32_e64 16, %55, implicit %exec
-    %57 = V_LSHRREV_B32_e64 16, %56, implicit %exec
-    %58 = V_BFE_U32 %56, 8, 8, implicit %exec
-    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit %exec
-    %60 = V_LSHLREV_B32_e64 16, %59, implicit %exec
+    %47 = V_LSHRREV_B32_e64 16, %46, implicit $exec
+    %48 = V_BFE_U32 %46, 8, 8, implicit $exec
+    %49 = V_ADD_F32_e64 0, %47, 1, %48, 0, 0, implicit $exec
+    %50 = V_LSHLREV_B32_e64 16, %49, implicit $exec
+    %51 = V_BFE_U32 %50, 8, 8, implicit $exec
+    %52 = V_SUB_F16_e64 1, 23, 1, %51, 0, 0, implicit $exec
+    %53 = V_LSHLREV_B32_e64 16, %52, implicit $exec
+    %54 = V_BFE_U32 %53, 8, 8, implicit $exec
+    %55 = V_MAC_F32_e64 1, 23, 1, %54, 1, %54, 1, 0, implicit $exec
+    %56 = V_LSHLREV_B32_e64 16, %55, implicit $exec
+    %57 = V_LSHRREV_B32_e64 16, %56, implicit $exec
+    %58 = V_BFE_U32 %56, 8, 8, implicit $exec
+    %59 = V_MAC_F16_e64 1, %57, 1, %58, 1, %58, 0, 2, implicit $exec
+    %60 = V_LSHLREV_B32_e64 16, %59, implicit $exec
 
-    %100 = V_MOV_B32_e32 %60, implicit %exec
+    %100 = V_MOV_B32_e32 %60, implicit $exec
 
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
 
 ...
 ---
 
 # GCN-LABEL: {{^}}name: vopc_instructions
 
-# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit %exec
-# GFX89: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX89: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX89: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX89: %vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX89: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+# GFX89: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX89: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX89: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX89: $vcc = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
 
 
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def %exec, implicit %exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_e64 0, 23, 0, killed %{{[0-9]+}}, 0, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %3, 0, 6, 4, implicit-def $vcc, implicit $exec
+# VI: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_e64 23, killed %{{[0-9]+}}, implicit-def $exec, implicit $exec
 
-# GFX9: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec
-# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit %exec
-# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit %exec
-# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# GFX9: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
+# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMP_LT_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit $exec
+# GFX9: %{{[0-9]+}}:vgpr_32 = V_MOV_B32_e32 23, implicit $exec
+# GFX9: %{{[0-9]+}}:sreg_64 = V_CMPX_EQ_I32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
 
 
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# VI: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMP_EQ_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# VI: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 1, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
 
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def %vcc, implicit-def %exec, implicit %exec
-# GFX9: %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def %exec, implicit %exec
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMP_EQ_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 1, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 0, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 0, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_sdwa 1, %{{[0-9]+}}, 1, %{{[0-9]+}}, 0, 6, 4, implicit-def $vcc, implicit-def $exec, implicit $exec
+# GFX9: $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %{{[0-9]+}}, 1, implicit-def $exec, implicit $exec
 
 
 
@@ -396,52 +396,52 @@
   - { id: 100, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
     %5 = S_MOV_B32 65535
     %6 = S_MOV_B32 65535
 
-    %10 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMP_EQ_F32_e32 123, killed %10, implicit-def %vcc, implicit %exec
-    %11 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMPX_GT_F32_e32 123, killed %11, implicit-def %vcc, implicit-def %exec, implicit %exec
-    %12 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMP_LT_I32_e32 123, killed %12, implicit-def %vcc, implicit %exec
-    %13 = V_AND_B32_e64 %5, %3, implicit %exec
-    V_CMPX_EQ_I32_e32 123, killed %13, implicit-def %vcc, implicit-def %exec, implicit %exec
+    %10 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMP_EQ_F32_e32 123, killed %10, implicit-def $vcc, implicit $exec
+    %11 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMPX_GT_F32_e32 123, killed %11, implicit-def $vcc, implicit-def $exec, implicit $exec
+    %12 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMP_LT_I32_e32 123, killed %12, implicit-def $vcc, implicit $exec
+    %13 = V_AND_B32_e64 %5, %3, implicit $exec
+    V_CMPX_EQ_I32_e32 123, killed %13, implicit-def $vcc, implicit-def $exec, implicit $exec
 
-    %14 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit %exec
-    %15 = V_AND_B32_e64 %5, %3, implicit %exec
-    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def %exec, implicit %exec
-    %16 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit %exec
-    %17 = V_AND_B32_e64 %5, %3, implicit %exec
-    %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def %exec, implicit %exec
+    %14 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %14, 0, implicit $exec
+    %15 = V_AND_B32_e64 %5, %3, implicit $exec
+    %18 = V_CMPX_GT_F32_e64 0, 23, 0, killed %15, 0, implicit-def $exec, implicit $exec
+    %16 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_LT_I32_e64 %6, killed %16, implicit $exec
+    %17 = V_AND_B32_e64 %5, %3, implicit $exec
+    %19 = V_CMPX_EQ_I32_e64 23, killed %17, implicit-def $exec, implicit $exec
 
-    %20 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit %exec
-    %21 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def %exec, implicit %exec
-    %23 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit %exec
-    %24 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def %exec, implicit %exec
-    %25 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def %exec, implicit %exec
-    %26 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def %exec, implicit %exec
-    %27 = V_AND_B32_e64 %5, %3, implicit %exec
-    %vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def %exec, implicit %exec
+    %20 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %20, 1, implicit $exec
+    %21 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 0, killed %21, 0, implicit-def $exec, implicit $exec
+    %23 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMP_EQ_F32_e64 0, %6, 0, killed %23, 1, implicit $exec
+    %24 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 0, killed %24, 0, implicit-def $exec, implicit $exec
+    %25 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 0, 23, 1, killed %25, 0, implicit-def $exec, implicit $exec
+    %26 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %26, 0, implicit-def $exec, implicit $exec
+    %27 = V_AND_B32_e64 %5, %3, implicit $exec
+    $vcc = V_CMPX_GT_F32_e64 1, 23, 1, killed %27, 1, implicit-def $exec, implicit $exec
 
 
-    %100 = V_MOV_B32_e32 %vcc_lo, implicit %exec
+    %100 = V_MOV_B32_e32 $vcc_lo, implicit $exec
 
-    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %100, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
index 99a000c..6c480d0 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-preserve.mir
@@ -31,26 +31,26 @@
   - { id: 13, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
   
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
-    %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
+    %4 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
-    %5 = V_AND_B32_e32 65535, %3, implicit %exec
-    %6 = V_LSHRREV_B32_e64 16, %4, implicit %exec
-    %7 = V_BFE_U32 %3, 8, 8, implicit %exec
-    %8 = V_LSHRREV_B32_e32 24, %4, implicit %exec
+    %5 = V_AND_B32_e32 65535, %3, implicit $exec
+    %6 = V_LSHRREV_B32_e64 16, %4, implicit $exec
+    %7 = V_BFE_U32 %3, 8, 8, implicit $exec
+    %8 = V_LSHRREV_B32_e32 24, %4, implicit $exec
 
-    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit %exec
-    %10 = V_LSHLREV_B16_e64 8, %9, implicit %exec
-    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit %exec
-    %12 = V_LSHLREV_B32_e64 16, %11, implicit %exec
+    %9 = V_ADD_F16_e64 0, %5, 0, %6, 0, 0, implicit $exec
+    %10 = V_LSHLREV_B16_e64 8, %9, implicit $exec
+    %11 = V_MUL_F32_e64 0, %7, 0, %8, 0, 0, implicit $exec
+    %12 = V_LSHLREV_B32_e64 16, %11, implicit $exec
 
-    %13 = V_OR_B32_e64 %10, %12, implicit %exec
+    %13 = V_OR_B32_e64 %10, %12, implicit $exec
 
-    FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %13, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir b/llvm/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir
index 52803ae..58d721c 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-scalar-ops.mir
@@ -183,7 +183,7 @@
   - { id: 82, class: vgpr_32 }
   - { id: 83, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -200,13 +200,13 @@
 body:             |
   bb.0.bb:
     successors: %bb.2.bb2(0x80000000)
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %4 = COPY %sgpr4_sgpr5
+    %4 = COPY $sgpr4_sgpr5
     %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8 = S_MOV_B64 0
     %7 = COPY %9
-    %30 = V_MOV_B32_e32 1, implicit %exec
+    %30 = V_MOV_B32_e32 1, implicit $exec
     S_BRANCH %bb.2.bb2
 
   bb.1.bb1:
@@ -217,36 +217,36 @@
 
     %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2
     %13 = COPY %7.sub1
-    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc
-    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc
+    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc
+    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc
     %16 = REG_SEQUENCE %14, 1, %15, 2
     %18 = COPY %16
-    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45)
-    %60 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %61 = V_LSHLREV_B32_e32 2, killed %60, implicit %exec
-    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec
+    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45)
+    %60 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %61 = V_LSHLREV_B32_e32 2, killed %60, implicit $exec
+    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec
     %66 = COPY %13
-    %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %67 = REG_SEQUENCE %70, 1, killed %65, 2
-    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9)
-    %37 = S_ADD_U32 %14, 4, implicit-def %scc
-    %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9)
+    %37 = S_ADD_U32 %14, 4, implicit-def $scc
+    %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc
     %71 = COPY killed %37
     %72 = COPY killed %38
     %41 = REG_SEQUENCE killed %71, 1, killed %72, 2
-    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep)
-    %73 = V_BFE_U32 %40, 8, 8, implicit %exec
-    %74 = V_LSHLREV_B32_e32 2, killed %73, implicit %exec
-    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec
-    %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep)
+    %73 = V_BFE_U32 %40, 8, 8, implicit $exec
+    %74 = V_LSHLREV_B32_e32 2, killed %73, implicit $exec
+    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec
+    %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %80 = REG_SEQUENCE %83, 1, killed %78, 2
-    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17)
-    %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc
-    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17)
+    %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc
+    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc
     %57 = REG_SEQUENCE %55, 1, killed %56, 2
     %1 = COPY %57
-    S_CMPK_EQ_I32 %55, 4096, implicit-def %scc
-    S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc
+    S_CMPK_EQ_I32 %55, 4096, implicit-def $scc
+    S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc
     S_BRANCH %bb.2.bb2
 
 ...
@@ -345,7 +345,7 @@
   - { id: 83, class: vgpr_32 }
   - { id: 84, class: sreg_32_xm0 }
 liveins:
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '%4' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '%4' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -362,13 +362,13 @@
 body:             |
   bb.0.bb:
     successors: %bb.2.bb2(0x80000000)
-    liveins: %sgpr4_sgpr5
+    liveins: $sgpr4_sgpr5
 
-    %4 = COPY %sgpr4_sgpr5
+    %4 = COPY $sgpr4_sgpr5
     %9 = S_LOAD_DWORDX2_IMM %4, 0, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
     %8 = S_MOV_B64 0
     %7 = COPY %9
-    %30 = V_MOV_B32_e32 1, implicit %exec
+    %30 = V_MOV_B32_e32 1, implicit $exec
     %84 = S_MOV_B32 2
     S_BRANCH %bb.2.bb2
 
@@ -380,36 +380,36 @@
 
     %0 = PHI %8, %bb.0.bb, %1, %bb.2.bb2
     %13 = COPY %7.sub1
-    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def %scc
-    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead %scc, implicit %scc
+    %14 = S_ADD_U32 %7.sub0, %0.sub0, implicit-def $scc
+    %15 = S_ADDC_U32 %7.sub1, %0.sub1, implicit-def dead $scc, implicit $scc
     %16 = REG_SEQUENCE %14, 1, %15, 2
     %18 = COPY %16
-    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.uglygep45)
-    %60 = V_BFE_U32 %17, 8, 8, implicit %exec
-    %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit %exec
-    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def %vcc, implicit %exec
+    %17 = FLAT_LOAD_DWORD %18, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.uglygep45)
+    %60 = V_BFE_U32 %17, 8, 8, implicit $exec
+    %61 = V_LSHLREV_B32_e32 %84, killed %60, implicit $exec
+    %70 = V_ADD_I32_e32 %7.sub0, %61, implicit-def $vcc, implicit $exec
     %66 = COPY %13
-    %65 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %65 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %67 = REG_SEQUENCE %70, 1, killed %65, 2
-    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp9)
-    %37 = S_ADD_U32 %14, 4, implicit-def %scc
-    %38 = S_ADDC_U32 %15, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %67, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp9)
+    %37 = S_ADD_U32 %14, 4, implicit-def $scc
+    %38 = S_ADDC_U32 %15, 0, implicit-def dead $scc, implicit $scc
     %71 = COPY killed %37
     %72 = COPY killed %38
     %41 = REG_SEQUENCE killed %71, 1, killed %72, 2
-    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.scevgep)
-    %73 = V_BFE_U32 %40, 8, 8, implicit %exec
-    %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit %exec
-    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def %vcc, implicit %exec
-    %78 = V_ADDC_U32_e32 0, %66, implicit-def %vcc, implicit %vcc, implicit %exec
+    %40 = FLAT_LOAD_DWORD killed %41, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.scevgep)
+    %73 = V_BFE_U32 %40, 8, 8, implicit $exec
+    %74 = V_LSHLREV_B32_e32 %84, killed %73, implicit $exec
+    %83 = V_ADD_I32_e32 %7.sub0, %74, implicit-def $vcc, implicit $exec
+    %78 = V_ADDC_U32_e32 0, %66, implicit-def $vcc, implicit $vcc, implicit $exec
     %80 = REG_SEQUENCE %83, 1, killed %78, 2
-    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4 into %ir.tmp17)
-    %55 = S_ADD_U32 %0.sub0, 8, implicit-def %scc
-    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead %scc, implicit %scc
+    FLAT_STORE_DWORD %80, %30, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4 into %ir.tmp17)
+    %55 = S_ADD_U32 %0.sub0, 8, implicit-def $scc
+    %56 = S_ADDC_U32 %0.sub1, 0, implicit-def dead $scc, implicit $scc
     %57 = REG_SEQUENCE %55, 1, killed %56, 2
     %1 = COPY %57
-    S_CMPK_EQ_I32 %55, 4096, implicit-def %scc
-    S_CBRANCH_SCC1 %bb.1.bb1, implicit %scc
+    S_CMPK_EQ_I32 %55, 4096, implicit-def $scc
+    S_CBRANCH_SCC1 %bb.1.bb1, implicit $scc
     S_BRANCH %bb.2.bb2
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir b/llvm/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
index c50601e..cc621f9 100644
--- a/llvm/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
+++ b/llvm/test/CodeGen/AMDGPU/sdwa-vop2-64bit.mir
@@ -6,10 +6,10 @@
 
 # GCN-LABEL: {{^}}name: vop2_64bit
 
-# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
-# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def %vcc, implicit %exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_BCNT_U32_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_BFM_B32_e64 %{{[0-9]+}}, killed %{{[0-9]+}}, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:vgpr_32 = V_CVT_PKNORM_I16_F32_e64 0, %{{[0-9]+}}, 0, killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec
+# GCN: %{{[0-9]+}}:sgpr_32 = V_READLANE_B32 killed %{{[0-9]+}}, 0, implicit-def $vcc, implicit $exec
 
 ---
 name:            vop2_64bit
@@ -36,26 +36,26 @@
   - { id: 20, class: vgpr_32 }
 body:             |
   bb.0:
-    liveins: %vgpr0_vgpr1, %vgpr2_vgpr3, %sgpr30_sgpr31
+    liveins: $vgpr0_vgpr1, $vgpr2_vgpr3, $sgpr30_sgpr31
 
-    %2 = COPY %sgpr30_sgpr31
-    %1 = COPY %vgpr2_vgpr3
-    %0 = COPY %vgpr0_vgpr1
-    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4)
+    %2 = COPY $sgpr30_sgpr31
+    %1 = COPY $vgpr2_vgpr3
+    %0 = COPY $vgpr0_vgpr1
+    %3 = FLAT_LOAD_DWORD %1, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4)
 
-    %12 = V_LSHRREV_B32_e64 16, %3, implicit %exec
-    %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def %vcc, implicit %exec
+    %12 = V_LSHRREV_B32_e64 16, %3, implicit $exec
+    %13 = V_BCNT_U32_B32_e64 %3, killed %12, implicit-def $vcc, implicit $exec
 
-    %14 = V_LSHRREV_B32_e64 16, %13, implicit %exec
-    %15 = V_BFM_B32_e64 %13, killed %14, implicit-def %vcc, implicit %exec
+    %14 = V_LSHRREV_B32_e64 16, %13, implicit $exec
+    %15 = V_BFM_B32_e64 %13, killed %14, implicit-def $vcc, implicit $exec
 
-    %16 = V_LSHRREV_B32_e64 16, %15, implicit %exec
-    %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def %vcc, implicit %exec
+    %16 = V_LSHRREV_B32_e64 16, %15, implicit $exec
+    %17 = V_CVT_PKNORM_I16_F32_e64 0, %15, 0, killed %16, 0, implicit-def $vcc, implicit $exec
 
-    %18 = V_LSHRREV_B32_e64 16, %17, implicit %exec
-    %19 = V_READLANE_B32 killed %18, 0, implicit-def %vcc, implicit %exec
-    %20 = V_MOV_B32_e64 %19, implicit %exec
+    %18 = V_LSHRREV_B32_e64 16, %17, implicit $exec
+    %19 = V_READLANE_B32 killed %18, 0, implicit-def $vcc, implicit $exec
+    %20 = V_MOV_B32_e64 %19, implicit $exec
 
-    FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit %exec, implicit %flat_scr :: (store 4)
-    %sgpr30_sgpr31 = COPY %2
-    S_SETPC_B64_return %sgpr30_sgpr31
+    FLAT_STORE_DWORD %0, %20, 0, 0, 0, implicit $exec, implicit $flat_scr :: (store 4)
+    $sgpr30_sgpr31 = COPY %2
+    S_SETPC_B64_return $sgpr30_sgpr31
diff --git a/llvm/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir b/llvm/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir
index 5dfd5aa..3da9504 100644
--- a/llvm/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir
+++ b/llvm/test/CodeGen/AMDGPU/sendmsg-m0-hazard.mir
@@ -7,14 +7,14 @@
 name: m0_sendmsg
 body: |
   ; GCN-LABEL: name: m0_sendmsg
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_SENDMSG 3, implicit %exec, implicit %m0
+  ; GCN-NEXT: S_SENDMSG 3, implicit $exec, implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_SENDMSG 3, implicit %exec, implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_SENDMSG 3, implicit $exec, implicit $m0
     S_ENDPGM
 ...
 ---
@@ -22,14 +22,14 @@
 name: m0_sendmsghalt
 body: |
   ; GCN-LABEL: name: m0_sendmsghalt
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_SENDMSGHALT 3, implicit %exec, implicit %m0
+  ; GCN-NEXT: S_SENDMSGHALT 3, implicit $exec, implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_SENDMSGHALT 3, implicit %exec, implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_SENDMSGHALT 3, implicit $exec, implicit $m0
     S_ENDPGM
 ...
 ---
@@ -37,13 +37,13 @@
 name: m0_ttracedata
 body: |
   ; GCN-LABEL: name: m0_ttracedata
-  ; GCN:  %m0 = S_MOV_B32 -1
+  ; GCN:  $m0 = S_MOV_B32 -1
   ; VI-NEXT: S_NOP 0
   ; GFX9-NEXT: S_NOP 0
-  ; GCN-NEXT: S_TTRACEDATA implicit %m0
+  ; GCN-NEXT: S_TTRACEDATA implicit $m0
 
   bb.0:
-    %m0 = S_MOV_B32 -1
-    S_TTRACEDATA implicit %m0
+    $m0 = S_MOV_B32 -1
+    S_TTRACEDATA implicit $m0
     S_ENDPGM
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-carry.mir b/llvm/test/CodeGen/AMDGPU/shrink-carry.mir
index cf000ff..8a6c8ce 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-carry.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-carry.mir
@@ -1,7 +1,7 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -start-before si-shrink-instructions -stop-before si-insert-skips -o - %s | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: subbrev{{$}}
-# GCN:       V_SUBBREV_U32_e64 0, undef %vgpr0, killed %vcc, implicit %exec
+# GCN:       V_SUBBREV_U32_e64 0, undef $vgpr0, killed $vcc, implicit $exec
 
 ---
 name:            subbrev
@@ -19,13 +19,13 @@
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_SUBBREV_U32_e64 0, %0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_SUBBREV_U32_e64 0, %0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: subb{{$}}
-# GCN:       V_SUBB_U32_e64 undef %vgpr0, 0, killed %vcc, implicit %exec
+# GCN:       V_SUBB_U32_e64 undef $vgpr0, 0, killed $vcc, implicit $exec
 
 ---
 name:            subb
@@ -43,13 +43,13 @@
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_SUBB_U32_e64 %0, 0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_SUBB_U32_e64 %0, 0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: addc{{$}}
-# GCN:       V_ADDC_U32_e32 0, undef %vgpr0, implicit-def %vcc, implicit killed %vcc, implicit %exec
+# GCN:       V_ADDC_U32_e32 0, undef $vgpr0, implicit-def $vcc, implicit killed $vcc, implicit $exec
 
 ---
 name:            addc
@@ -67,13 +67,13 @@
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_ADDC_U32_e64 0, %0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_ADDC_U32_e64 0, %0, %3, implicit $exec
 
 ...
 
 # GCN-LABEL: name: addc2{{$}}
-# GCN:       V_ADDC_U32_e32 0, undef %vgpr0, implicit-def %vcc, implicit killed %vcc, implicit %exec
+# GCN:       V_ADDC_U32_e32 0, undef $vgpr0, implicit-def $vcc, implicit killed $vcc, implicit $exec
 
 ---
 name:            addc2
@@ -91,7 +91,7 @@
     %0 = IMPLICIT_DEF
     %1 = IMPLICIT_DEF
     %2 = IMPLICIT_DEF
-    %3 = V_CMP_GT_U32_e64 %0, %1, implicit %exec
-    %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit %exec
+    %3 = V_CMP_GT_U32_e64 %0, %1, implicit $exec
+    %4, %5 = V_ADDC_U32_e64 %0, 0, %3, implicit $exec
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
index 0ffee0c..cd25fc3 100644
--- a/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
+++ b/llvm/test/CodeGen/AMDGPU/shrink-vop3-carry-out.mir
@@ -8,8 +8,8 @@
 
 ...
 # GCN-LABEL: name: shrink_add_vop3{{$}}
-# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %19, %17, implicit %exec
-# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
+# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_ADD_I32_e64 %19, %17, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
 name:            shrink_add_vop3
 alignment:       0
 exposesReturnsTwice: false
@@ -49,8 +49,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -67,32 +67,32 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
-    %29, %9 = V_ADD_I32_e64 %19, %17, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
+    %29, %9 = V_ADD_I32_e64 %19, %17, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: shrink_sub_vop3{{$}}
-# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUB_I32_e64 %19, %17, implicit %exec
-# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
+# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUB_I32_e64 %19, %17, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
 
 name:            shrink_sub_vop3
 alignment:       0
@@ -133,8 +133,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -151,32 +151,32 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
-    %29, %9 = V_SUB_I32_e64 %19, %17, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
+    %29, %9 = V_SUB_I32_e64 %19, %17, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: shrink_subrev_vop3{{$}}
-# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUBREV_I32_e64 %19, %17, implicit %exec
-# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
+# GCN: %29:vgpr_32, %9:sreg_64_xexec = V_SUBREV_I32_e64 %19, %17, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
 
 name:            shrink_subrev_vop3
 alignment:       0
@@ -217,8 +217,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -235,32 +235,32 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
-    %29, %9 = V_SUBREV_I32_e64 %19, %17, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
+    %29, %9 = V_SUBREV_I32_e64 %19, %17, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed %9, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %29, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: check_addc_src2_vop3{{$}}
-# GCN: %29:vgpr_32, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec
-# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
+# GCN: %29:vgpr_32, $vcc = V_ADDC_U32_e64 %19, %17, %9, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
 name: check_addc_src2_vop3
 alignment:       0
 exposesReturnsTwice: false
@@ -300,8 +300,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -318,33 +318,33 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
     %9 = S_MOV_B64 0
-    %29, %vcc = V_ADDC_U32_e64 %19, %17, %9, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %29, $vcc = V_ADDC_U32_e64 %19, %17, %9, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 ---
 # GCN-LABEL: name: shrink_addc_vop3{{$}}
-# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit %vcc, implicit %exec
-# GCN %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
+# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit $vcc, implicit $exec
+# GCN %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
 
 name:            shrink_addc_vop3
 alignment:       0
@@ -385,8 +385,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -403,34 +403,34 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
-    %vcc = S_MOV_B64 0
-    %29, %vcc = V_ADDC_U32_e64 %19, %17, %vcc, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
+    $vcc = S_MOV_B64 0
+    %29, $vcc = V_ADDC_U32_e64 %19, %17, $vcc, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
 
 ---
 # GCN-LABEL: name: shrink_addc_undef_vcc{{$}}
-# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def %vcc, implicit undef %vcc, implicit %exec
-# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
+# GCN: %29:vgpr_32 = V_ADDC_U32_e32 %19, %17, implicit-def $vcc, implicit undef $vcc, implicit $exec
+# GCN: %24:vgpr_32 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
 name:            shrink_addc_undef_vcc
 alignment:       0
 exposesReturnsTwice: false
@@ -470,8 +470,8 @@
   - { id: 28, class: vreg_64 }
   - { id: 29, class: vgpr_32 }
 liveins:
-  - { reg: '%sgpr0_sgpr1', virtual-reg: '%0' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0_sgpr1', virtual-reg: '%0' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -488,25 +488,25 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0:
-    liveins: %sgpr0_sgpr1, %vgpr0
+    liveins: $sgpr0_sgpr1, $vgpr0
 
-    %3 = COPY %vgpr0
-    %0 = COPY %sgpr0_sgpr1
+    %3 = COPY $vgpr0
+    %0 = COPY $sgpr0_sgpr1
     %4 = S_LOAD_DWORDX2_IMM %0, 9, 0
     %5 = S_LOAD_DWORDX2_IMM %0, 11, 0
-    %26 = V_ASHRREV_I32_e32 31, %3, implicit %exec
+    %26 = V_ASHRREV_I32_e32 31, %3, implicit $exec
     %27 = REG_SEQUENCE %3, 1, %26, 2
     %10 = S_MOV_B32 61440
     %11 = S_MOV_B32 0
     %12 = REG_SEQUENCE killed %11, 1, killed %10, 2
     %13 = REG_SEQUENCE killed %5, 17, %12, 18
-    %28 = V_LSHL_B64 killed %27, 2, implicit %exec
+    %28 = V_LSHL_B64 killed %27, 2, implicit $exec
     %16 = REG_SEQUENCE killed %4, 17, %12, 18
-    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit %exec
-    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit %exec
-    %29, %vcc = V_ADDC_U32_e64 %19, %17, undef %vcc, implicit %exec
-    %24 = V_CNDMASK_B32_e64 0, 1, killed %vcc, implicit %exec
-    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit %exec
+    %17 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 0, 0, 0, 0, implicit $exec
+    %19 = BUFFER_LOAD_DWORD_ADDR64 %28, %13, 0, 4, 0, 0, 0, implicit $exec
+    %29, $vcc = V_ADDC_U32_e64 %19, %17, undef $vcc, implicit $exec
+    %24 = V_CNDMASK_B32_e64 0, 1, killed $vcc, implicit $exec
+    BUFFER_STORE_DWORD_ADDR64 %24, %28, killed %16, 0, 0, 0, 0, 0, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir b/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
index 18176de..f2c7466 100644
--- a/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
+++ b/llvm/test/CodeGen/AMDGPU/si-fix-sgpr-copies.mir
@@ -19,21 +19,21 @@
   ; GCN-LABEL: name: phi_visit_order
   ; GCN: V_ADD_I32
   bb.0:
-    liveins: %vgpr0
-    %7 = COPY %vgpr0
+    liveins: $vgpr0
+    %7 = COPY $vgpr0
     %8 = S_MOV_B32 0
 
   bb.1:
     %0 = PHI %8, %bb.0, %0, %bb.1, %2, %bb.2
-    %9 = V_MOV_B32_e32 9, implicit %exec
-    %10 = V_CMP_EQ_U32_e64 %7, %9, implicit %exec
-    %1 = SI_IF %10, %bb.2, implicit-def %exec, implicit-def %scc, implicit %exec
+    %9 = V_MOV_B32_e32 9, implicit $exec
+    %10 = V_CMP_EQ_U32_e64 %7, %9, implicit $exec
+    %1 = SI_IF %10, %bb.2, implicit-def $exec, implicit-def $scc, implicit $exec
     S_BRANCH %bb.1
 
   bb.2:
-    SI_END_CF %1, implicit-def %exec, implicit-def %scc, implicit %exec
+    SI_END_CF %1, implicit-def $exec, implicit-def $scc, implicit $exec
     %11 = S_MOV_B32 1
-    %2 = S_ADD_I32 %0, %11, implicit-def %scc
+    %2 = S_ADD_I32 %0, %11, implicit-def $scc
     S_BRANCH %bb.1
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll b/llvm/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
index 7ae4636..f3cd607 100644
--- a/llvm/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-instr-info-correct-implicit-operands.ll
@@ -3,7 +3,7 @@
 ; register operands in the correct order when modifying the opcode of an
 ; instruction to V_ADD_I32_e32.
 
-; CHECK: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 %{{[0-9]+}}, %{{[0-9]+}}, implicit-def %vcc, implicit %exec
+; CHECK: %{{[0-9]+}}:vgpr_32 = V_ADD_I32_e32 %{{[0-9]+}}, %{{[0-9]+}}, implicit-def $vcc, implicit $exec
 
 define amdgpu_kernel void @test(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
 entry:
diff --git a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
index 1e9b6b5..422049c 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-empty-live-interval.mir
@@ -7,13 +7,13 @@
 
 # CHECK-LABEL: name: expecting_non_empty_interval
 
-# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit %exec
-# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (store 8 into %stack.0, align 4)
-# CHECK-NEXT: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit %exec
-# CHECK-NEXT: dead %2:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit %exec
+# CHECK: undef %7.sub1:vreg_64 = V_MAC_F32_e32 0, undef %1:vgpr_32, undef %7.sub1, implicit $exec
+# CHECK-NEXT: SI_SPILL_V64_SAVE %7, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (store 8 into %stack.0, align 4)
+# CHECK-NEXT: undef %5.sub1:vreg_64 = V_MOV_B32_e32 1786773504, implicit $exec
+# CHECK-NEXT: dead %2:vgpr_32 = V_MUL_F32_e32 0, %5.sub1, implicit $exec
 
 # CHECK: S_NOP 0, implicit %6.sub1
-# CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (load 8 from %stack.0, align 4)
+# CHECK-NEXT: %8:vreg_64 = SI_SPILL_V64_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (load 8 from %stack.0, align 4)
 # CHECK-NEXT: S_NOP 0, implicit %8.sub1
 # CHECK-NEXT: S_NOP 0, implicit undef %9.sub0
 
@@ -27,9 +27,9 @@
 body:             |
   bb.0:
     successors: %bb.1
-    undef %0.sub1 = V_MAC_F32_e32 0, undef %1, undef %0.sub1, implicit %exec
-    undef %3.sub1 = V_MOV_B32_e32 1786773504, implicit %exec
-    dead %2 = V_MUL_F32_e32 0, %3.sub1, implicit %exec
+    undef %0.sub1 = V_MAC_F32_e32 0, undef %1, undef %0.sub1, implicit $exec
+    undef %3.sub1 = V_MOV_B32_e32 1786773504, implicit $exec
+    dead %2 = V_MUL_F32_e32 0, %3.sub1, implicit $exec
 
   bb.1:
     S_NOP 0, implicit %3.sub1
@@ -44,12 +44,12 @@
 # CHECK-LABEL: name: rematerialize_empty_interval_has_reference
 
 # CHECK-NOT: MOV
-# CHECK: undef %3.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit %exec
+# CHECK: undef %3.sub2:vreg_128 = V_MOV_B32_e32 1786773504, implicit $exec
 
 # CHECK: bb.1:
 # CHECK-NEXT: S_NOP 0, implicit %3.sub2
 # CHECK-NEXT: S_NOP 0, implicit undef %6.sub0
-# CHECK-NEXT: undef %4.sub2:vreg_128 = V_MOV_B32_e32 0, implicit %exec
+# CHECK-NEXT: undef %4.sub2:vreg_128 = V_MOV_B32_e32 0, implicit $exec
 # CHECK-NEXT: S_NOP 0, implicit %4.sub2
 name: rematerialize_empty_interval_has_reference
 tracksRegLiveness: true
@@ -62,8 +62,8 @@
   bb.0:
     successors: %bb.1
 
-    undef %0.sub2 = V_MOV_B32_e32 0, implicit %exec
-    undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit %exec
+    undef %0.sub2 = V_MOV_B32_e32 0, implicit $exec
+    undef %3.sub2 = V_MOV_B32_e32 1786773504, implicit $exec
 
   bb.1:
     S_NOP 0, implicit %3.sub2
diff --git a/llvm/test/CodeGen/AMDGPU/splitkit.mir b/llvm/test/CodeGen/AMDGPU/splitkit.mir
index 45a9c41..3f9aeac 100644
--- a/llvm/test/CodeGen/AMDGPU/splitkit.mir
+++ b/llvm/test/CodeGen/AMDGPU/splitkit.mir
@@ -22,7 +22,7 @@
     S_NOP 0, implicit-def %0.sub3 : sreg_128
 
     ; Clobber registers
-    S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+    S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11
 
     S_NOP 0, implicit %0.sub0
     S_NOP 0, implicit %0.sub3
@@ -34,31 +34,31 @@
 # allocated to sgpr0_sgpr1 and the first to something else so we see two copies
 # in between for the two subregisters that are alive.
 # CHECK-LABEL: name: func1
-# CHECK: [[REG0:%sgpr[0-9]+]] = COPY %sgpr0
-# CHECK: [[REG1:%sgpr[0-9]+]] = COPY %sgpr2
+# CHECK: [[REG0:\$sgpr[0-9]+]] = COPY $sgpr0
+# CHECK: [[REG1:\$sgpr[0-9]+]] = COPY $sgpr2
 # CHECK: S_NOP 0
 # CHECK: S_NOP 0, implicit renamable [[REG0]]
 # CHECK: S_NOP 0, implicit renamable [[REG1]]
-# CHECK: %sgpr0 = COPY renamable [[REG0]]
-# CHECK: %sgpr2 = COPY renamable [[REG1]]
+# CHECK: $sgpr0 = COPY renamable [[REG0]]
+# CHECK: $sgpr2 = COPY renamable [[REG1]]
 # CHECK: S_NOP
-# CHECK: S_NOP 0, implicit renamable %sgpr0
-# CHECK: S_NOP 0, implicit renamable %sgpr2
+# CHECK: S_NOP 0, implicit renamable $sgpr0
+# CHECK: S_NOP 0, implicit renamable $sgpr2
 name: func1
 tracksRegLiveness: true
 body: |
   bb.0:
-    liveins: %sgpr0, %sgpr1, %sgpr2
-    undef %0.sub0 : sreg_128 = COPY %sgpr0
-    %0.sub2 = COPY %sgpr2
+    liveins: $sgpr0, $sgpr1, $sgpr2
+    undef %0.sub0 : sreg_128 = COPY $sgpr0
+    %0.sub2 = COPY $sgpr2
 
-    S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1
+    S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1
 
     S_NOP 0, implicit %0.sub0
     S_NOP 0, implicit %0.sub2
 
     ; Clobber everything but sgpr0-sgpr3
-    S_NOP 0, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11, implicit-def dead %sgpr12, implicit-def dead %sgpr13, implicit-def dead %sgpr14, implicit-def dead %sgpr15, implicit-def dead %vcc_lo, implicit-def dead %vcc_hi
+    S_NOP 0, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11, implicit-def dead $sgpr12, implicit-def dead $sgpr13, implicit-def dead $sgpr14, implicit-def dead $sgpr15, implicit-def dead $vcc_lo, implicit-def dead $vcc_hi
 
     S_NOP 0, implicit %0.sub0
     S_NOP 0, implicit %0.sub2
@@ -67,8 +67,8 @@
 # Check that copy hoisting out of loops works. This mainly should not crash the
 # compiler when it hoists a subreg copy sequence.
 # CHECK-LABEL: name: splitHoist
-# CHECK: S_NOP 0, implicit-def renamable %sgpr0
-# CHECK: S_NOP 0, implicit-def renamable %sgpr3
+# CHECK: S_NOP 0, implicit-def renamable $sgpr0
+# CHECK: S_NOP 0, implicit-def renamable $sgpr3
 # CHECK-NEXT: SI_SPILL_S128_SAVE
 name: splitHoist
 tracksRegLiveness: true
@@ -78,7 +78,7 @@
     S_NOP 0, implicit-def undef %0.sub0 : sreg_128
     S_NOP 0, implicit-def %0.sub3 : sreg_128
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
@@ -86,15 +86,15 @@
     S_NOP 0, implicit %0.sub0
 
     ; Clobber registers
-    S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+    S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11
 
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.3
 
   bb.2:
     successors: %bb.3
     ; Clobber registers
-    S_NOP 0, implicit-def dead %sgpr0, implicit-def dead %sgpr1, implicit-def dead %sgpr2, implicit-def dead %sgpr3, implicit-def dead %sgpr4, implicit-def dead %sgpr5, implicit-def dead %sgpr6, implicit-def dead %sgpr7, implicit-def dead %sgpr8, implicit-def dead %sgpr9, implicit-def dead %sgpr10, implicit-def dead %sgpr11
+    S_NOP 0, implicit-def dead $sgpr0, implicit-def dead $sgpr1, implicit-def dead $sgpr2, implicit-def dead $sgpr3, implicit-def dead $sgpr4, implicit-def dead $sgpr5, implicit-def dead $sgpr6, implicit-def dead $sgpr7, implicit-def dead $sgpr8, implicit-def dead $sgpr9, implicit-def dead $sgpr10, implicit-def dead $sgpr11
     S_BRANCH %bb.3
 
   bb.3:
diff --git a/llvm/test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir b/llvm/test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir
index 8d3b8f2..ed5db1f 100644
--- a/llvm/test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir
+++ b/llvm/test/CodeGen/AMDGPU/stack-slot-color-sgpr-vgpr-spills.mir
@@ -9,11 +9,11 @@
 # CHECK: - { id: 1, name: '', type: spill-slot, offset: 0, size: 4, alignment: 4,
 # CHECK-NEXT: stack-id: 1,
 
-# CHECK: SI_SPILL_V32_SAVE killed %vgpr0, %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (store 4 into %stack.0)
-# CHECK: %vgpr0 = SI_SPILL_V32_RESTORE %stack.0, %sgpr0_sgpr1_sgpr2_sgpr3, %sgpr5, 0, implicit %exec :: (load 4 from %stack.0)
+# CHECK: SI_SPILL_V32_SAVE killed $vgpr0, %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (store 4 into %stack.0)
+# CHECK: $vgpr0 = SI_SPILL_V32_RESTORE %stack.0, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec :: (load 4 from %stack.0)
 
-# CHECK: SI_SPILL_S32_SAVE killed renamable %sgpr6, %stack.1, implicit %exec, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr5, implicit-def dead %m0 :: (store 4 into %stack.1)
-# CHECK: %sgpr6 = SI_SPILL_S32_RESTORE %stack.1, implicit %exec, implicit %sgpr0_sgpr1_sgpr2_sgpr3, implicit %sgpr5, implicit-def dead %m0 :: (load 4 from %stack.1)
+# CHECK: SI_SPILL_S32_SAVE killed renamable $sgpr6, %stack.1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr5, implicit-def dead $m0 :: (store 4 into %stack.1)
+# CHECK: $sgpr6 = SI_SPILL_S32_RESTORE %stack.1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3, implicit $sgpr5, implicit-def dead $m0 :: (load 4 from %stack.1)
 
 name: no_merge_sgpr_vgpr_spill_slot
 tracksRegLiveness: true
@@ -25,10 +25,10 @@
 
 body: |
   bb.0:
-    %0 = FLAT_LOAD_DWORD undef %vgpr0_vgpr1, 0, 0, 0, implicit %flat_scr, implicit %exec
-    %2 = FLAT_LOAD_DWORD undef %vgpr0_vgpr1, 0, 0, 0, implicit %flat_scr, implicit %exec
+    %0 = FLAT_LOAD_DWORD undef $vgpr0_vgpr1, 0, 0, 0, implicit $flat_scr, implicit $exec
+    %2 = FLAT_LOAD_DWORD undef $vgpr0_vgpr1, 0, 0, 0, implicit $flat_scr, implicit $exec
     S_NOP 0, implicit %0
-    %1 = S_LOAD_DWORD_IMM undef %sgpr0_sgpr1, 0, 0
-    %3 = S_LOAD_DWORD_IMM undef %sgpr0_sgpr1, 0, 0
+    %1 = S_LOAD_DWORD_IMM undef $sgpr0_sgpr1, 0, 0
+    %3 = S_LOAD_DWORD_IMM undef $sgpr0_sgpr1, 0, 0
     S_NOP 0, implicit %1
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
index 2d353b8..d798889 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg-intervals.mir
@@ -31,7 +31,7 @@
   - { id: 0, class: sreg_64 }
 body: |
   bb.0:
-    S_CBRANCH_VCCNZ %bb.1, implicit undef %vcc
+    S_CBRANCH_VCCNZ %bb.1, implicit undef $vcc
     S_BRANCH %bb.2
 
   bb.1:
diff --git a/llvm/test/CodeGen/AMDGPU/subreg_interference.mir b/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
index 3575e41..e8478a8 100644
--- a/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
+++ b/llvm/test/CodeGen/AMDGPU/subreg_interference.mir
@@ -12,12 +12,12 @@
 # sgpr0-sgpr3.
 #
 # CHECK-LABEL: func0
-# CHECK: S_NOP 0, implicit-def renamable %sgpr0
-# CHECK: S_NOP 0, implicit-def renamable %sgpr3
-# CHECK: S_NOP 0, implicit-def renamable %sgpr1
-# CHECK: S_NOP 0, implicit-def renamable %sgpr2
-# CHECK: S_NOP 0, implicit renamable %sgpr0, implicit renamable %sgpr3
-# CHECK: S_NOP 0, implicit renamable %sgpr1, implicit renamable %sgpr2
+# CHECK: S_NOP 0, implicit-def renamable $sgpr0
+# CHECK: S_NOP 0, implicit-def renamable $sgpr3
+# CHECK: S_NOP 0, implicit-def renamable $sgpr1
+# CHECK: S_NOP 0, implicit-def renamable $sgpr2
+# CHECK: S_NOP 0, implicit renamable $sgpr0, implicit renamable $sgpr3
+# CHECK: S_NOP 0, implicit renamable $sgpr1, implicit renamable $sgpr2
 name: func0
 body: |
   bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/syncscopes.ll b/llvm/test/CodeGen/AMDGPU/syncscopes.ll
index 61d9e93e..8600753 100644
--- a/llvm/test/CodeGen/AMDGPU/syncscopes.ll
+++ b/llvm/test/CodeGen/AMDGPU/syncscopes.ll
@@ -1,9 +1,9 @@
 ; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx803 -stop-before=si-debugger-insert-nops < %s | FileCheck --check-prefix=GCN %s
 
 ; GCN-LABEL: name: syncscopes
-; GCN: FLAT_STORE_DWORD killed renamable %vgpr1_vgpr2, killed renamable %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4)
-; GCN: FLAT_STORE_DWORD killed renamable %vgpr4_vgpr5, killed renamable %vgpr3, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4)
-; GCN: FLAT_STORE_DWORD killed renamable %vgpr7_vgpr8, killed renamable %vgpr6, 0, 0, 0, implicit %exec, implicit %flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr1_vgpr2, killed renamable $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("agent") seq_cst 4 into %ir.agent_out, addrspace 4)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr4_vgpr5, killed renamable $vgpr3, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("workgroup") seq_cst 4 into %ir.workgroup_out, addrspace 4)
+; GCN: FLAT_STORE_DWORD killed renamable $vgpr7_vgpr8, killed renamable $vgpr6, 0, 0, 0, implicit $exec, implicit $flat_scr :: (volatile store syncscope("wavefront") seq_cst 4 into %ir.wavefront_out, addrspace 4)
 define void @syncscopes(
     i32 %agent,
     i32 addrspace(4)* %agent_out,
diff --git a/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir b/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
index 707676d..f2eb1db 100644
--- a/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
+++ b/llvm/test/CodeGen/AMDGPU/twoaddr-mad.mir
@@ -1,7 +1,7 @@
 # RUN: llc -march=amdgcn %s -run-pass twoaddressinstruction -verify-machineinstrs -o - | FileCheck -check-prefix=GCN %s
 
 # GCN-LABEL: name: test_madmk_reg_imm_f32
-# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit %exec
+# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
 ---
 name:            test_madmk_reg_imm_f32
 registers:
@@ -14,13 +14,13 @@
 
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
-    %2 = V_MOV_B32_e32 1078523331, implicit %exec
-    %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit %exec
+    %2 = V_MOV_B32_e32 1078523331, implicit $exec
+    %3 = V_MAC_F32_e32 killed %0.sub0, %2, killed %1, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_imm_reg_f32
-# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit %exec
+# GCN: V_MADMK_F32 killed %0.sub0, 1078523331, killed %1, implicit $exec
 ---
 name:            test_madmk_imm_reg_f32
 registers:
@@ -33,13 +33,13 @@
 
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
-    %2 = V_MOV_B32_e32 1078523331, implicit %exec
-    %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit %exec
+    %2 = V_MOV_B32_e32 1078523331, implicit $exec
+    %3 = V_MAC_F32_e32 %2, killed %0.sub0, killed %1, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madak_f32
-# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit %exec
+# GCN: V_MADAK_F32 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
 ---
 name:            test_madak_f32
 registers:
@@ -50,13 +50,13 @@
   bb.0:
 
     %0 = IMPLICIT_DEF
-    %1 = V_MOV_B32_e32 1078523331, implicit %exec
-    %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit %exec
+    %1 = V_MOV_B32_e32 1078523331, implicit $exec
+    %2 = V_MAC_F32_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_reg_imm_f16
-# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit %exec
+# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
 ---
 name:            test_madmk_reg_imm_f16
 registers:
@@ -69,13 +69,13 @@
 
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
-    %2 = V_MOV_B32_e32 1078523331, implicit %exec
-    %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit %exec
+    %2 = V_MOV_B32_e32 1078523331, implicit $exec
+    %3 = V_MAC_F16_e32 killed %0.sub0, %2, killed %1, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madmk_imm_reg_f16
-# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit %exec
+# GCN: V_MADMK_F16 killed %0.sub0, 1078523331, killed %1, implicit $exec
 ---
 name:            test_madmk_imm_reg_f16
 registers:
@@ -88,13 +88,13 @@
 
     %0 = IMPLICIT_DEF
     %1 = COPY %0.sub1
-    %2 = V_MOV_B32_e32 1078523331, implicit %exec
-    %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit %exec
+    %2 = V_MOV_B32_e32 1078523331, implicit $exec
+    %3 = V_MAC_F16_e32 %2, killed %0.sub0, killed %1, implicit $exec
 
 ...
 
 # GCN-LABEL: name: test_madak_f16
-# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit %exec
+# GCN: V_MADAK_F16 killed %0.sub0, %0.sub1, 1078523331, implicit $exec
 ---
 name:            test_madak_f16
 registers:
@@ -105,15 +105,15 @@
   bb.0:
 
     %0 = IMPLICIT_DEF
-    %1 = V_MOV_B32_e32 1078523331, implicit %exec
-    %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit %exec
+    %1 = V_MOV_B32_e32 1078523331, implicit $exec
+    %2 = V_MAC_F16_e32 killed %0.sub0, %0.sub1, %1, implicit $exec
 ...
 
 # Make sure constant bus restriction isn't violated if src0 is an SGPR.
 
 # GCN-LABEL: name: test_madak_sgpr_src0_f32
-# GCN: %1:vgpr_32 = V_MOV_B32_e32 1078523331, implicit %exec
-# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit %exec
+# GCN: %1:vgpr_32 = V_MOV_B32_e32 1078523331, implicit $exec
+# GCN: %2:vgpr_32 = V_MAD_F32 0, killed %0, 0, %1, 0, %3:vgpr_32, 0, 0, implicit $exec
 
 ---
 name:            test_madak_sgpr_src0_f32
@@ -126,15 +126,15 @@
   bb.0:
 
     %0 = IMPLICIT_DEF
-    %1 = V_MOV_B32_e32 1078523331, implicit %exec
-    %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit %exec
+    %1 = V_MOV_B32_e32 1078523331, implicit $exec
+    %2 = V_MAC_F32_e32 killed %0, %1, %3, implicit $exec
 
 ...
 
 # This can still fold if this is an inline immediate.
 
 # GCN-LABEL: name: test_madak_inlineimm_src0_f32
-# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit %exec
+# GCN: %1:vgpr_32 = V_MADMK_F32 1073741824, 1078523331, %2:vgpr_32, implicit $exec
 
 ---
 name:            test_madak_inlineimm_src0_f32
@@ -145,14 +145,14 @@
 body:             |
   bb.0:
 
-    %0 = V_MOV_B32_e32 1078523331, implicit %exec
-    %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit %exec
+    %0 = V_MOV_B32_e32 1078523331, implicit $exec
+    %1 = V_MAC_F32_e32 1073741824, %0, %2, implicit $exec
 
 ...
 # Non-inline immediate uses constant bus already.
 
 # GCN-LABEL: name: test_madak_otherimm_src0_f32
-# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit %exec
+# GCN: %1:vgpr_32 = V_MAC_F32_e32 1120403456, %0, %1, implicit $exec
 
 ---
 name:            test_madak_otherimm_src0_f32
@@ -163,14 +163,14 @@
 body:             |
   bb.0:
 
-    %0 = V_MOV_B32_e32 1078523331, implicit %exec
-    %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit %exec
+    %0 = V_MOV_B32_e32 1078523331, implicit $exec
+    %1 = V_MAC_F32_e32 1120403456, %0, %2, implicit $exec
 
 ...
 # Non-inline immediate uses constant bus already.
 
 # GCN-LABEL: name: test_madak_other_constantlike_src0_f32
-# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit %exec
+# GCN: %1:vgpr_32 = V_MAC_F32_e32 %stack.0, %0, %1, implicit $exec
 ---
 name:            test_madak_other_constantlike_src0_f32
 registers:
@@ -184,7 +184,7 @@
 body:             |
   bb.0:
 
-    %0 = V_MOV_B32_e32 1078523331, implicit %exec
-    %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit %exec
+    %0 = V_MOV_B32_e32 1078523331, implicit $exec
+    %1 = V_MAC_F32_e32 %stack.0, %0, %2, implicit $exec
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
index 7c445b7..8b74e02 100644
--- a/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/undefined-physreg-sgpr-spill.mir
@@ -17,10 +17,10 @@
 # leaving a spill of the undefined register.
 
 # CHECK-LABEL: name: undefined_physreg_sgpr_spill
-# CHECK: %sgpr0_sgpr1 = COPY %exec, implicit-def %exec
-# CHECK-NEXT: SI_SPILL_S64_SAVE %sgpr0_sgpr1,
-# CHECK-NEXT: %sgpr2_sgpr3 = S_AND_B64 killed %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc
-# CHECK: %exec = COPY killed %sgpr2_sgpr3
+# CHECK: $sgpr0_sgpr1 = COPY $exec, implicit-def $exec
+# CHECK-NEXT: SI_SPILL_S64_SAVE $sgpr0_sgpr1,
+# CHECK-NEXT: $sgpr2_sgpr3 = S_AND_B64 killed $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc
+# CHECK: $exec = COPY killed $sgpr2_sgpr3
 name:            undefined_physreg_sgpr_spill
 alignment:       0
 exposesReturnsTwice: false
@@ -30,8 +30,8 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '' }
+  - { reg: '$vgpr0', virtual-reg: '' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '' }
 stack:
   - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4,
       stack-id: 1, callee-saved-register: '', callee-saved-restored: true,
@@ -40,39 +40,39 @@
 body:             |
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %vgpr0, %sgpr4_sgpr5
+    liveins: $vgpr0, $sgpr4_sgpr5
 
-    %vgpr1_vgpr2 = COPY killed %sgpr4_sgpr5, implicit %exec
-    %vgpr1 = GLOBAL_LOAD_UBYTE killed %vgpr1_vgpr2, 0, 0, 0, implicit %exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`)
-    %vcc = V_CMP_NE_U32_e64 0, %vgpr0, implicit %exec
-    %sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed %vgpr1, implicit %exec
-    %vgpr1 = V_CNDMASK_B32_e64 0, -1, killed %sgpr0_sgpr1, implicit %exec
-    %sgpr0_sgpr1 = COPY %exec, implicit-def %exec
-    SI_SPILL_S64_SAVE %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4)
-    %sgpr2_sgpr3 = S_AND_B64 killed %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec
+    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`)
+    $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec
+    $vgpr1 = V_CNDMASK_B32_e64 0, -1, killed $sgpr0_sgpr1, implicit $exec
+    $sgpr0_sgpr1 = COPY $exec, implicit-def $exec
+    SI_SPILL_S64_SAVE $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4)
+    $sgpr2_sgpr3 = S_AND_B64 killed $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
     successors: %bb.3(0x80000000)
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
-    %sgpr2_sgpr3 = S_MOV_B64 0
-    %vgpr2 = V_MOV_B32_e32 0, implicit %exec
-    %sgpr4_sgpr5 = IMPLICIT_DEF
+    $sgpr2_sgpr3 = S_MOV_B64 0
+    $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+    $sgpr4_sgpr5 = IMPLICIT_DEF
     S_BRANCH %bb.3
 
   bb.2:
     successors:
 
-    %sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (load 8 from %stack.0, align 4)
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+    $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 
   bb.3:
-    liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr2_sgpr3, %sgpr4_sgpr5
+    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr2_sgpr3, $sgpr4_sgpr5
 
-    %vcc = COPY %vgpr1
+    $vcc = COPY $vgpr1
     S_ENDPGM
 
 ...
@@ -80,10 +80,10 @@
 
 # Move spill to after future save instruction
 # CHECK-LABEL: {{^}}name: undefined_physreg_sgpr_spill_reorder
-# CHECK: %sgpr0_sgpr1 = COPY %exec, implicit-def %exec
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc
-# CHECK: SI_SPILL_S64_SAVE killed %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4)
-# CHECK: %exec = COPY killed %sgpr2_sgpr3
+# CHECK: $sgpr0_sgpr1 = COPY $exec, implicit-def $exec
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc
+# CHECK: SI_SPILL_S64_SAVE killed $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4)
+# CHECK: $exec = COPY killed $sgpr2_sgpr3
 name:            undefined_physreg_sgpr_spill_reorder
 alignment:       0
 exposesReturnsTwice: false
@@ -93,8 +93,8 @@
 tracksRegLiveness: true
 registers:
 liveins:
-  - { reg: '%vgpr0', virtual-reg: '' }
-  - { reg: '%sgpr4_sgpr5', virtual-reg: '' }
+  - { reg: '$vgpr0', virtual-reg: '' }
+  - { reg: '$sgpr4_sgpr5', virtual-reg: '' }
 stack:
   - { id: 0, name: '', type: spill-slot, offset: 0, size: 8, alignment: 4,
       stack-id: 1, callee-saved-register: '', callee-saved-restored: true,
@@ -103,39 +103,39 @@
 body:             |
   bb.0:
     successors: %bb.1, %bb.2
-    liveins: %vgpr0, %sgpr4_sgpr5
+    liveins: $vgpr0, $sgpr4_sgpr5
 
-    %vgpr1_vgpr2 = COPY killed %sgpr4_sgpr5, implicit %exec
-    %vgpr1 = GLOBAL_LOAD_UBYTE killed %vgpr1_vgpr2, 0, 0, 0, implicit %exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`)
-    %vcc = V_CMP_NE_U32_e64 0, %vgpr0, implicit %exec
-    %sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed %vgpr1, implicit %exec
-    %vgpr1 = V_CNDMASK_B32_e64 0, -1, killed %sgpr0_sgpr1, implicit %exec
-    %sgpr0_sgpr1 = COPY %exec, implicit-def %exec
-    %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def dead %scc
-    SI_SPILL_S64_SAVE killed %sgpr0_sgpr1, %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (store 8 into %stack.0, align 4)
-    %exec = S_MOV_B64_term killed %sgpr2_sgpr3
-    SI_MASK_BRANCH %bb.2, implicit %exec
+    $vgpr1_vgpr2 = COPY killed $sgpr4_sgpr5, implicit $exec
+    $vgpr1 = GLOBAL_LOAD_UBYTE killed $vgpr1_vgpr2, 0, 0, 0, implicit $exec :: (non-temporal dereferenceable invariant load 1 from `i1 addrspace(2)* undef`)
+    $vcc = V_CMP_NE_U32_e64 0, $vgpr0, implicit $exec
+    $sgpr0_sgpr1 = V_CMP_EQ_U32_e64 1, killed $vgpr1, implicit $exec
+    $vgpr1 = V_CNDMASK_B32_e64 0, -1, killed $sgpr0_sgpr1, implicit $exec
+    $sgpr0_sgpr1 = COPY $exec, implicit-def $exec
+    $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def dead $scc
+    SI_SPILL_S64_SAVE killed $sgpr0_sgpr1, %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (store 8 into %stack.0, align 4)
+    $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+    SI_MASK_BRANCH %bb.2, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
     successors: %bb.3(0x80000000)
-    liveins: %vgpr0, %vgpr1
+    liveins: $vgpr0, $vgpr1
 
-    %sgpr2_sgpr3 = S_MOV_B64 0
-    %vgpr2 = V_MOV_B32_e32 0, implicit %exec
-    %sgpr4_sgpr5 = IMPLICIT_DEF
+    $sgpr2_sgpr3 = S_MOV_B64 0
+    $vgpr2 = V_MOV_B32_e32 0, implicit $exec
+    $sgpr4_sgpr5 = IMPLICIT_DEF
     S_BRANCH %bb.3
 
   bb.2:
     successors:
 
-    %sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit %exec, implicit %sgpr8_sgpr9_sgpr10_sgpr11, implicit %sgpr13, implicit-def dead %m0 :: (load 8 from %stack.0, align 4)
-    %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+    $sgpr0_sgpr1 = SI_SPILL_S64_RESTORE %stack.0, implicit $exec, implicit $sgpr8_sgpr9_sgpr10_sgpr11, implicit $sgpr13, implicit-def dead $m0 :: (load 8 from %stack.0, align 4)
+    $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
 
   bb.3:
-    liveins: %vgpr0, %vgpr1, %vgpr2, %sgpr2_sgpr3, %sgpr4_sgpr5
+    liveins: $vgpr0, $vgpr1, $vgpr2, $sgpr2_sgpr3, $sgpr4_sgpr5
 
-    %vcc = COPY %vgpr1
+    $vcc = COPY $vgpr1
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
index ff9826b..9a6f68c 100644
--- a/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
+++ b/llvm/test/CodeGen/AMDGPU/vccz-corrupt-bug-workaround.mir
@@ -46,9 +46,9 @@
 ...
 ---
 # CHECK-LABEL: name: vccz_corrupt_workaround
-# CHECK: %vcc = V_CMP_EQ_F32
-# CHECK-NEXT: %vcc = S_MOV_B64 %vcc
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed %vcc
+# CHECK: $vcc = V_CMP_EQ_F32
+# CHECK-NEXT: $vcc = S_MOV_B64 $vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit killed $vcc
 
 name:            vccz_corrupt_workaround
 alignment:       0
@@ -58,7 +58,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
+  - { reg: '$sgpr0_sgpr1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -75,43 +75,43 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr2 = S_LOAD_DWORD_IMM %sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`)
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    %vcc = V_CMP_EQ_F32_e64 0, 0, 0, %sgpr2, 0, implicit %exec
-    S_CBRANCH_VCCZ %bb.1, implicit killed %vcc
+    $sgpr2 = S_LOAD_DWORD_IMM $sgpr0_sgpr1, 9, 0 :: (non-temporal dereferenceable invariant load 4 from `float addrspace(2)* undef`)
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    $vcc = V_CMP_EQ_F32_e64 0, 0, 0, $sgpr2, 0, implicit $exec
+    S_CBRANCH_VCCZ %bb.1, implicit killed $vcc
 
   bb.2.if:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 9, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 9, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.3
 
   bb.1.else:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 100, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 100, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
 
   bb.3.done:
-    liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
 ---
 # CHECK-LABEL: name: vccz_corrupt_undef_vcc
 # CHECK: S_WAITCNT
-# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef %vcc
+# CHECK-NEXT: S_CBRANCH_VCCZ %bb.2, implicit undef $vcc
 
 name:            vccz_corrupt_undef_vcc
 alignment:       0
@@ -121,7 +121,7 @@
 selected:        false
 tracksRegLiveness: true
 liveins:
-  - { reg: '%sgpr0_sgpr1' }
+  - { reg: '$sgpr0_sgpr1' }
 frameInfo:
   isFrameAddressTaken: false
   isReturnAddressTaken: false
@@ -138,34 +138,34 @@
   hasMustTailInVarArgFunc: false
 body:             |
   bb.0.entry:
-    liveins: %sgpr0_sgpr1
+    liveins: $sgpr0_sgpr1
 
-    %sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed %sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
-    %sgpr7 = S_MOV_B32 61440
-    %sgpr6 = S_MOV_B32 -1
-    S_CBRANCH_VCCZ %bb.1, implicit undef %vcc
+    $sgpr0_sgpr1 = S_LOAD_DWORDX2_IMM killed $sgpr0_sgpr1, 11, 0 :: (non-temporal dereferenceable invariant load 8 from `i64 addrspace(2)* undef`)
+    $sgpr7 = S_MOV_B32 61440
+    $sgpr6 = S_MOV_B32 -1
+    S_CBRANCH_VCCZ %bb.1, implicit undef $vcc
 
   bb.2.if:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 9, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 0, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 9, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 0, implicit $exec
     S_BRANCH %bb.3
 
   bb.1.else:
-    liveins: %sgpr6, %sgpr7, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $sgpr6, $sgpr7, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %vgpr0 = V_MOV_B32_e32 100, implicit %exec
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
-    %vgpr0 = V_MOV_B32_e32 1, implicit %exec
+    $vgpr0 = V_MOV_B32_e32 100, implicit $exec
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile store 4 into `i32 addrspace(1)* undef`)
+    $vgpr0 = V_MOV_B32_e32 1, implicit $exec
 
   bb.3.done:
-    liveins: %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
+    liveins: $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3:0x00000003
 
-    %sgpr3 = S_MOV_B32 61440
-    %sgpr2 = S_MOV_B32 -1
-    BUFFER_STORE_DWORD_OFFSET killed %vgpr0, killed %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into %ir.out)
+    $sgpr3 = S_MOV_B32 61440
+    $sgpr2 = S_MOV_B32 -1
+    BUFFER_STORE_DWORD_OFFSET killed $vgpr0, killed $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into %ir.out)
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir b/llvm/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
index 1c34789..a635bd8 100644
--- a/llvm/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
+++ b/llvm/test/CodeGen/AMDGPU/vop-shrink-frame-index.mir
@@ -35,7 +35,7 @@
 # GCN-LABEL: name: fold_fi_vgpr{{$}}
 # GCN: %1:vgpr_32 = IMPLICIT_DEF
 
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec
 name: fold_fi_vgpr
 tracksRegLiveness: true
 registers:
@@ -48,15 +48,15 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
     %1 = IMPLICIT_DEF
-    %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+    %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec
     S_ENDPGM
 
 ...
 # GCN-LABEL: name: fold_vgpr_fi{{$}}
 # GCN: %1:vgpr_32 = IMPLICIT_DEF
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec
 name: fold_vgpr_fi
 tracksRegLiveness: true
 registers:
@@ -69,16 +69,16 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
     %1 = IMPLICIT_DEF
-    %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+    %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec
     S_ENDPGM
 
 ...
 # GCN-LABEL: name: fold_sgpr_fi{{$}}
-# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
 # GCN: %1:sgpr_32 = IMPLICIT_DEF
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def $vcc, implicit $exec
 name: fold_sgpr_fi
 tracksRegLiveness: true
 registers:
@@ -91,16 +91,16 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
     %1 = IMPLICIT_DEF
-    %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+    %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec
     S_ENDPGM
 
 ...
 # GCN-LABEL: name: fold_fi_sgpr{{$}}
-# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
 # GCN: %1:sgpr_32 = IMPLICIT_DEF
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def %vcc, implicit %exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 %1, %0, implicit-def $vcc, implicit $exec
 name: fold_fi_sgpr
 tracksRegLiveness: true
 registers:
@@ -113,15 +113,15 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
     %1 = IMPLICIT_DEF
-    %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+    %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec
     S_ENDPGM
 ...
 # TODO: Should probably prefer folding immediate first
 # GCN-LABEL: name: fold_fi_imm{{$}}
-# GCN: %1:vgpr_32 = V_MOV_B32_e32 999, implicit %exec
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def %vcc, implicit %exec
+# GCN: %1:vgpr_32 = V_MOV_B32_e32 999, implicit $exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 %stack.0.alloca, %1, implicit-def $vcc, implicit $exec
 name: fold_fi_imm
 tracksRegLiveness: true
 registers:
@@ -134,15 +134,15 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
-    %1 = V_MOV_B32_e32 999, implicit %exec
-    %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
+    %1 = V_MOV_B32_e32 999, implicit $exec
+    %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec
     S_ENDPGM
 
 ...
 # GCN-LABEL: name: fold_imm_fi{{$}}
-# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 999, %0, implicit-def %vcc, implicit %exec
+# GCN: %0:vgpr_32 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 999, %0, implicit-def $vcc, implicit $exec
 name: fold_imm_fi
 tracksRegLiveness: true
 registers:
@@ -155,7 +155,7 @@
       di-location: '' }
 body:             |
   bb.0:
-    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit %exec
-    %1 = V_MOV_B32_e32 999, implicit %exec
-    %2, %vcc = V_ADD_I32_e64 %1, %0, implicit %exec
+    %0 = V_MOV_B32_e32 %stack.0.alloca, implicit $exec
+    %1 = V_MOV_B32_e32 999, implicit $exec
+    %2, $vcc = V_ADD_I32_e64 %1, %0, implicit $exec
     S_ENDPGM
diff --git a/llvm/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir b/llvm/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
index a190324..fa1d520 100644
--- a/llvm/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
+++ b/llvm/test/CodeGen/AMDGPU/vop-shrink-non-ssa.mir
@@ -1,8 +1,8 @@
 # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-shrink-instructions -o - %s | FileCheck -check-prefix=GCN %s
 ...
 # GCN-LABEL: name: fold_imm_non_ssa{{$}}
-# GCN: %0:vgpr_32 = V_MOV_B32_e32 123, implicit %exec
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 456, %0, implicit-def %vcc, implicit %exec
+# GCN: %0:vgpr_32 = V_MOV_B32_e32 123, implicit $exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 456, %0, implicit-def $vcc, implicit $exec
 
 name: fold_imm_non_ssa
 tracksRegLiveness: true
@@ -14,15 +14,15 @@
 body:             |
   bb.0:
     %0 = COPY undef %0
-    %0 = V_MOV_B32_e32 123, implicit %exec
-    %1 = V_MOV_B32_e32 456, implicit %exec
-    %2, %vcc = V_ADD_I32_e64 %0, %1, implicit %exec
+    %0 = V_MOV_B32_e32 123, implicit $exec
+    %1 = V_MOV_B32_e32 456, implicit $exec
+    %2, $vcc = V_ADD_I32_e64 %0, %1, implicit $exec
     S_ENDPGM
 
 ...
 # GCN-LABEL: name: fold_partially_defined_superreg{{$}}
-# GCN: %1:vgpr_32 = V_MOV_B32_e32 456, implicit %exec
-# GCN: %2:vgpr_32 = V_ADD_I32_e32 123, %1, implicit-def %vcc, implicit %exec
+# GCN: %1:vgpr_32 = V_MOV_B32_e32 456, implicit $exec
+# GCN: %2:vgpr_32 = V_ADD_I32_e32 123, %1, implicit-def $vcc, implicit $exec
 name: fold_partially_defined_superreg
 tracksRegLiveness: true
 registers:
@@ -32,9 +32,9 @@
   - { id: 3, class: vreg_64 }
 body:             |
   bb.0:
-    undef %3.sub0 = V_MOV_B32_e32 123, implicit %exec, implicit-def %3
-    %1 = V_MOV_B32_e32 456, implicit %exec
-    %2, %vcc = V_ADD_I32_e64 %3.sub0, %1, implicit %exec
+    undef %3.sub0 = V_MOV_B32_e32 123, implicit $exec, implicit-def %3
+    %1 = V_MOV_B32_e32 456, implicit $exec
+    %2, $vcc = V_ADD_I32_e64 %3.sub0, %1, implicit $exec
     S_ENDPGM
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir b/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
index 5612c7c..79111d1 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt-permute.mir
@@ -7,15 +7,15 @@
 
 name:            waitcnt-permute
 liveins:
-  - { reg: '%vgpr0' }
-  - { reg: '%vgpr1' }
-  - { reg: '%sgpr30_sgpr31' }
+  - { reg: '$vgpr0' }
+  - { reg: '$vgpr1' }
+  - { reg: '$sgpr30_sgpr31' }
 body:             |
   bb.0:
-    liveins: %vgpr0, %vgpr1, %sgpr30_sgpr31
+    liveins: $vgpr0, $vgpr1, $sgpr30_sgpr31
 
-    %vgpr0 = DS_BPERMUTE_B32 killed %vgpr0, killed %vgpr1, 0, implicit %exec
-    %vgpr0 = V_ADD_F32_e32 1065353216, killed %vgpr0, implicit %exec
-    S_SETPC_B64_return killed %sgpr30_sgpr31, implicit killed %vgpr0
+    $vgpr0 = DS_BPERMUTE_B32 killed $vgpr0, killed $vgpr1, 0, implicit $exec
+    $vgpr0 = V_ADD_F32_e32 1065353216, killed $vgpr0, implicit $exec
+    S_SETPC_B64_return killed $sgpr30_sgpr31, implicit killed $vgpr0
 
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/waitcnt.mir b/llvm/test/CodeGen/AMDGPU/waitcnt.mir
index 38662e8..0ffd543 100644
--- a/llvm/test/CodeGen/AMDGPU/waitcnt.mir
+++ b/llvm/test/CodeGen/AMDGPU/waitcnt.mir
@@ -51,22 +51,22 @@
 body: |
   bb.0:
     successors: %bb.1
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.global4)
-    %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
-    %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.global4)
+    $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.global16)
+    $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec
     S_BRANCH %bb.1
 
   bb.1:
     successors: %bb.2
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
-    %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.global16)
-    %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr
+    $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.global16)
+    $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec
     S_BRANCH %bb.2
 
   bb.2:
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 4 from %ir.flat4)
-    %vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 %vgpr7_vgpr8, 0, 0, 0, implicit %exec, implicit %flat_scr :: (load 16 from %ir.flat16)
-    %vgpr0 = V_MOV_B32_e32 %vgpr1, implicit %exec
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 4 from %ir.flat4)
+    $vgpr3_vgpr4_vgpr5_vgpr6 = FLAT_LOAD_DWORDX4 $vgpr7_vgpr8, 0, 0, 0, implicit $exec, implicit $flat_scr :: (load 16 from %ir.flat16)
+    $vgpr0 = V_MOV_B32_e32 $vgpr1, implicit $exec
     S_ENDPGM
 ...
 ---
@@ -74,7 +74,7 @@
 # need to wait immediately.
 
 # CHECK-LABEL: name: single_fallthrough_successor_no_end_block_wait
-# CHECK:   %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2
+# CHECK:   $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2
 # CHECK-NOT: S_WAITCNT
 
 # CHECK: bb.1:
@@ -86,11 +86,11 @@
 body: |
   bb.0:
     successors: %bb.1
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr
 
   bb.1:
-    %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
-    FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr3_vgpr4 = V_LSHLREV_B64 4, $vgpr7_vgpr8, implicit $exec
+    FLAT_STORE_DWORD $vgpr3_vgpr4, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
 ---
@@ -99,7 +99,7 @@
 
 
 # CHECK-LABEL: name: single_branch_successor_not_next_block
-# CHECK:   %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2
+# CHECK:   $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2
 # CHECK-NEXT: S_WAITCNT 112
 
 # CHECK: bb.1
@@ -114,15 +114,15 @@
 body: |
   bb.0:
     successors: %bb.2
-    %vgpr0 = FLAT_LOAD_DWORD %vgpr1_vgpr2, 0, 0, 0, implicit %exec, implicit %flat_scr
+    $vgpr0 = FLAT_LOAD_DWORD $vgpr1_vgpr2, 0, 0, 0, implicit $exec, implicit $flat_scr
    S_BRANCH %bb.2
 
   bb.1:
-    FLAT_STORE_DWORD %vgpr8_vgpr9, %vgpr10, 0, 0, 0, implicit %exec, implicit %flat_scr
+    FLAT_STORE_DWORD $vgpr8_vgpr9, $vgpr10, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 
   bb.2:
-     %vgpr3_vgpr4 = V_LSHLREV_B64 4, %vgpr7_vgpr8, implicit %exec
-    FLAT_STORE_DWORD %vgpr3_vgpr4, %vgpr0, 0, 0, 0, implicit %exec, implicit %flat_scr
+     $vgpr3_vgpr4 = V_LSHLREV_B64 4, $vgpr7_vgpr8, implicit $exec
+    FLAT_STORE_DWORD $vgpr3_vgpr4, $vgpr0, 0, 0, 0, implicit $exec, implicit $flat_scr
     S_ENDPGM
 ...
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.mir b/llvm/test/CodeGen/AMDGPU/wqm.mir
index 637a003..724c3a9 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.mir
+++ b/llvm/test/CodeGen/AMDGPU/wqm.mir
@@ -28,23 +28,23 @@
   - { id: 11, class: vgpr_32, preferred-register: '' }
   - { id: 12, class: vgpr_32, preferred-register: '' }
 liveins:         
-  - { reg: '%sgpr0', virtual-reg: '%0' }
-  - { reg: '%sgpr1', virtual-reg: '%1' }
-  - { reg: '%sgpr2', virtual-reg: '%2' }
-  - { reg: '%vgpr0', virtual-reg: '%3' }
+  - { reg: '$sgpr0', virtual-reg: '%0' }
+  - { reg: '$sgpr1', virtual-reg: '%1' }
+  - { reg: '$sgpr2', virtual-reg: '%2' }
+  - { reg: '$vgpr0', virtual-reg: '%3' }
 body:             |
   bb.0:
-    liveins: %sgpr0, %sgpr1, %sgpr2, %vgpr0
+    liveins: $sgpr0, $sgpr1, $sgpr2, $vgpr0
   
-    %3 = COPY %vgpr0
-    %2 = COPY %sgpr2
-    %1 = COPY %sgpr1
-    %0 = COPY %sgpr0
-    S_CMP_LT_I32 0, %0, implicit-def %scc
-    %12 = V_ADD_I32_e32 %3, %3, implicit-def %vcc, implicit %exec
-    %5 = S_CSELECT_B32 %2, %1, implicit %scc
-    %11 = V_ADD_I32_e32 %5, %12, implicit-def %vcc, implicit %exec
-    %vgpr0 = WWM %11, implicit %exec
-    SI_RETURN_TO_EPILOG %vgpr0
+    %3 = COPY $vgpr0
+    %2 = COPY $sgpr2
+    %1 = COPY $sgpr1
+    %0 = COPY $sgpr0
+    S_CMP_LT_I32 0, %0, implicit-def $scc
+    %12 = V_ADD_I32_e32 %3, %3, implicit-def $vcc, implicit $exec
+    %5 = S_CSELECT_B32 %2, %1, implicit $scc
+    %11 = V_ADD_I32_e32 %5, %12, implicit-def $vcc, implicit $exec
+    $vgpr0 = WWM %11, implicit $exec
+    SI_RETURN_TO_EPILOG $vgpr0
 
 ...