Followup on Proposal to move MIR physical register namespace to '$' sigil.
Discussed here:
http://lists.llvm.org/pipermail/llvm-dev/2018-January/120320.html
In preparation for adding support for named vregs we are changing the sigil for
physical registers in MIR to '$' from '%'. This will prevent name clashes of
named physical register with named vregs.
llvm-svn: 323922
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
index 4ed8360..2c42f00 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
+++ b/llvm/test/CodeGen/AMDGPU/optimize-if-exec-masking.mir
@@ -147,8 +147,8 @@
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec_xor{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_and_saveexec_xor
@@ -159,7 +159,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -176,37 +176,37 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_and_saveexec
@@ -217,7 +217,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -234,36 +234,36 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_or_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_OR_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_or_saveexec
@@ -274,7 +274,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -291,39 +291,39 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_OR_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_OR_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_valu_middle
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`, addrspace 1)
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_and_saveexec_xor_valu_middle
alignment: 0
@@ -333,7 +333,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -350,41 +350,41 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- BUFFER_STORE_DWORD_OFFSET %vgpr0, undef %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
- %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ BUFFER_STORE_DWORD_OFFSET $vgpr0, undef $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_wrong_reg{{$}}
-# CHECK: %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr0_sgpr1
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr0_sgpr1
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
name: optimize_if_and_saveexec_xor_wrong_reg
alignment: 0
exposesReturnsTwice: false
@@ -393,7 +393,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -410,40 +410,40 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr6 = S_MOV_B32 -1
- %sgpr7 = S_MOV_B32 61440
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr0_sgpr1 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %sgpr0_sgpr1 = S_XOR_B64 undef %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term %sgpr0_sgpr1
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr6 = S_MOV_B32 -1
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr0_sgpr1 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $sgpr0_sgpr1 = S_XOR_B64 undef $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term $sgpr0_sgpr1
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1 , %sgpr4_sgpr5_sgpr6_sgpr7
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ liveins: $sgpr0_sgpr1 , $sgpr4_sgpr5_sgpr6_sgpr7
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1, %sgpr4_sgpr5_sgpr6_sgpr7
+ liveins: $vgpr0, $sgpr0_sgpr1, $sgpr4_sgpr5_sgpr6_sgpr7
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_modify_copy_to_exec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
name: optimize_if_and_saveexec_xor_modify_copy_to_exec
alignment: 0
@@ -453,7 +453,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -470,42 +470,42 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %sgpr2_sgpr3 = S_OR_B64 killed %sgpr2_sgpr3, 1, implicit-def %scc
- %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $sgpr2_sgpr3 = S_OR_B64 killed $sgpr2_sgpr3, 1, implicit-def $scc
+ $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr0 = S_MOV_B32 0
- %sgpr1 = S_MOV_B32 1
- %sgpr2 = S_MOV_B32 -1
- %sgpr3 = S_MOV_B32 61440
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr0 = S_MOV_B32 0
+ $sgpr1 = S_MOV_B32 1
+ $sgpr2 = S_MOV_B32 -1
+ $sgpr3 = S_MOV_B32 61440
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_and_saveexec_xor_live_out_setexec{{$}}
-# CHECK: %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
-# CHECK-NEXT: %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY %sgpr2_sgpr3
+# CHECK: $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+# CHECK-NEXT: $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY $sgpr2_sgpr3
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_and_saveexec_xor_live_out_setexec
alignment: 0
@@ -515,7 +515,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -532,40 +532,40 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_AND_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %sgpr0_sgpr1 = S_XOR_B64 %sgpr2_sgpr3, killed %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1, %sgpr2_sgpr3
- S_SLEEP 0, implicit %sgpr2_sgpr3
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ liveins: $sgpr0_sgpr1, $sgpr2_sgpr3
+ S_SLEEP 0, implicit $sgpr2_sgpr3
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
# CHECK-LABEL: name: optimize_if_unknown_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = COPY %exec
-# CHECK: %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr0_sgpr1 = COPY $exec
+# CHECK: $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
name: optimize_if_unknown_saveexec
alignment: 0
@@ -575,7 +575,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -592,36 +592,36 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_LSHR_B64 %sgpr0_sgpr1, killed %vcc_lo, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_LSHR_B64 $sgpr0_sgpr1, killed $vcc_lo, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_andn2_saveexec{{$}}
-# CHECK: %sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 %vcc, implicit-def %exec, implicit-def %scc, implicit %exec
+# CHECK: $sgpr0_sgpr1 = S_ANDN2_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
# CHECK-NEXT: SI_MASK_BRANCH
name: optimize_if_andn2_saveexec
@@ -632,7 +632,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -649,38 +649,38 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_ANDN2_B64 %sgpr0_sgpr1, killed %vcc, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_ANDN2_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...
---
# CHECK-LABEL: name: optimize_if_andn2_saveexec_no_commute{{$}}
-# CHECK: %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
-# CHECK-NEXT: %exec = COPY killed %sgpr2_sgpr3
-# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit %exec
+# CHECK: $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+# CHECK-NEXT: $exec = COPY killed $sgpr2_sgpr3
+# CHECK-NEXT: SI_MASK_BRANCH %bb.2, implicit $exec
name: optimize_if_andn2_saveexec_no_commute
alignment: 0
exposesReturnsTwice: false
@@ -689,7 +689,7 @@
selected: false
tracksRegLiveness: true
liveins:
- - { reg: '%vgpr0' }
+ - { reg: '$vgpr0' }
frameInfo:
isFrameAddressTaken: false
isReturnAddressTaken: false
@@ -706,30 +706,30 @@
hasMustTailInVarArgFunc: false
body: |
bb.0.main_body:
- liveins: %vgpr0
+ liveins: $vgpr0
- %sgpr0_sgpr1 = COPY %exec
- %vcc = V_CMP_EQ_I32_e64 0, killed %vgpr0, implicit %exec
- %vgpr0 = V_MOV_B32_e32 4, implicit %exec
- %sgpr2_sgpr3 = S_ANDN2_B64 killed %vcc, %sgpr0_sgpr1, implicit-def %scc
- %exec = S_MOV_B64_term killed %sgpr2_sgpr3
- SI_MASK_BRANCH %bb.2, implicit %exec
+ $sgpr0_sgpr1 = COPY $exec
+ $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec
+ $vgpr0 = V_MOV_B32_e32 4, implicit $exec
+ $sgpr2_sgpr3 = S_ANDN2_B64 killed $vcc, $sgpr0_sgpr1, implicit-def $scc
+ $exec = S_MOV_B64_term killed $sgpr2_sgpr3
+ SI_MASK_BRANCH %bb.2, implicit $exec
S_BRANCH %bb.1
bb.1.if:
- liveins: %sgpr0_sgpr1
+ liveins: $sgpr0_sgpr1
- %sgpr7 = S_MOV_B32 61440
- %sgpr6 = S_MOV_B32 -1
- %vgpr0 = BUFFER_LOAD_DWORD_OFFSET %sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit %exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
+ $sgpr7 = S_MOV_B32 61440
+ $sgpr6 = S_MOV_B32 -1
+ $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`)
bb.2.end:
- liveins: %vgpr0, %sgpr0_sgpr1
+ liveins: $vgpr0, $sgpr0_sgpr1
- %exec = S_OR_B64 %exec, killed %sgpr0_sgpr1, implicit-def %scc
- %sgpr3 = S_MOV_B32 61440
- %sgpr2 = S_MOV_B32 -1
- BUFFER_STORE_DWORD_OFFSET killed %vgpr0, %sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit %exec :: (store 4 into `i32 addrspace(1)* undef`)
+ $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc
+ $sgpr3 = S_MOV_B32 61440
+ $sgpr2 = S_MOV_B32 -1
+ BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`)
S_ENDPGM
...