Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 1 | # RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-vgpr-index-mode -run-pass=greedy -stress-regalloc=16 -o - %s | FileCheck -check-prefixes=GCN %s |
| 2 | |
| 3 | # An interval for a register that was partially defined was split, creating |
| 4 | # a new use (a COPY) which was reached by the undef point. In particular, |
| 5 | # there was a subrange of the new register which was reached by an "undef" |
| 6 | # point. When the code in extendSegmentsToUses verified value numbers between |
| 7 | # the new and the old live ranges, it did not account for this kind of a |
| 8 | # situation and asserted expecting the old value to exist. For a PHI node |
| 9 | # it is legal to have a missing predecessor value as long as the end of |
| 10 | # the predecessor is jointly dominated by the undefs. |
| 11 | # |
| 12 | # A simplified form of this can be illustrated as |
| 13 | # |
| 14 | # bb.1: |
| 15 | # %0:vreg_64 = IMPLICIT_DEF |
| 16 | # ... |
| 17 | # S_CBRANCH_SCC1 %bb.2, implicit $vcc |
| 18 | # S_BRANCH %bb.3 |
| 19 | # |
| 20 | # bb.2: |
| 21 | # ; predecessors: %bb.1, %bb.4 |
| 22 | # dead %1:vreg_64 = COPY %0:vreg_64 ; This is the point of the inserted split |
| 23 | # ... |
| 24 | # S_BRANCH %bb.5 |
| 25 | # |
| 26 | # bb.3: |
| 27 | # ; predecessors: %bb.1 |
| 28 | # undef %0.sub0:vreg_64 = COPY %123:sreg_32 ; undef point for %0.sub1 |
| 29 | # ... |
| 30 | # S_BRANCH %bb.4 |
| 31 | # |
| 32 | # bb.4 |
| 33 | # ; predecessors: %bb.4 |
| 34 | # ... |
| 35 | # S_BRANCH %bb.2 |
| 36 | # |
| 37 | # This test exposes this scenario which caused previously caused an assert |
| 38 | |
| 39 | --- |
| 40 | name: _amdgpu_ps_main |
| 41 | tracksRegLiveness: true |
| 42 | liveins: |
| 43 | - { reg: '$vgpr2', virtual-reg: '%0' } |
| 44 | - { reg: '$vgpr3', virtual-reg: '%1' } |
| 45 | - { reg: '$vgpr4', virtual-reg: '%2' } |
| 46 | body: | |
| 47 | bb.0: |
| 48 | successors: %bb.1(0x40000000), %bb.2(0x40000000) |
| 49 | liveins: $vgpr2, $vgpr3, $vgpr4 |
| 50 | %2:vgpr_32 = COPY $vgpr4 |
| 51 | %1:vgpr_32 = COPY $vgpr3 |
| 52 | %0:vgpr_32 = COPY $vgpr2 |
| 53 | S_CBRANCH_SCC0 %bb.2, implicit undef $scc |
| 54 | |
| 55 | bb.1: |
| 56 | successors: %bb.5(0x80000000) |
| 57 | undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec |
| 58 | %3.sub1:vreg_128 = COPY %3.sub0 |
| 59 | %3.sub2:vreg_128 = COPY %3.sub0 |
| 60 | S_BRANCH %bb.5 |
| 61 | |
| 62 | bb.2: |
| 63 | successors: %bb.3(0x40000000), %bb.4(0x40000000) |
| 64 | S_CBRANCH_SCC0 %bb.4, implicit undef $scc |
| 65 | |
| 66 | bb.3: |
| 67 | successors: %bb.5(0x80000000) |
| 68 | undef %3.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec |
| 69 | %3.sub1:vreg_128 = COPY %3.sub0 |
| 70 | S_BRANCH %bb.5 |
| 71 | |
| 72 | bb.4: |
| 73 | successors: %bb.5(0x80000000) |
| 74 | %3:vreg_128 = IMPLICIT_DEF |
| 75 | |
| 76 | bb.5: |
| 77 | successors: %bb.6(0x40000000), %bb.22(0x40000000) |
| 78 | %4:vgpr_32 = V_MOV_B32_e32 0, implicit $exec |
| 79 | S_CBRANCH_SCC1 %bb.22, implicit undef $scc |
| 80 | S_BRANCH %bb.6 |
| 81 | |
| 82 | bb.6: |
| 83 | successors: %bb.8(0x40000000), %bb.11(0x40000000) |
| 84 | %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec |
| 85 | dead %6:vgpr_32 = V_MUL_F32_e32 0, undef %7:vgpr_32, implicit $exec |
| 86 | dead %8:vgpr_32 = V_MUL_F32_e32 0, %2, implicit $exec |
| 87 | undef %9.sub1:vreg_64 = V_MUL_F32_e32 0, %1, implicit $exec |
| 88 | undef %10.sub0:vreg_128 = V_MUL_F32_e32 0, %0, implicit $exec |
| 89 | undef %11.sub0:sreg_256 = S_MOV_B32 0 |
| 90 | %11.sub1:sreg_256 = COPY %11.sub0 |
| 91 | %11.sub2:sreg_256 = COPY %11.sub0 |
| 92 | %11.sub3:sreg_256 = COPY %11.sub0 |
| 93 | %11.sub4:sreg_256 = COPY %11.sub0 |
| 94 | %11.sub5:sreg_256 = COPY %11.sub0 |
| 95 | %11.sub6:sreg_256 = COPY %11.sub0 |
| 96 | %11.sub7:sreg_256 = COPY %11.sub0 |
| 97 | %12:vreg_128 = IMAGE_SAMPLE_LZ_V4_V2 %9, %11, undef %13:sreg_128, 15, 0, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4) |
| 98 | %14:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec |
| 99 | %15:vreg_128 = IMPLICIT_DEF |
| 100 | S_CBRANCH_SCC1 %bb.8, implicit undef $scc |
| 101 | S_BRANCH %bb.11 |
| 102 | |
| 103 | bb.7: |
| 104 | successors: %bb.13(0x80000000) |
John Brawn | 1d0d86a | 2018-12-14 14:07:57 +0000 | [diff] [blame] | 105 | |
| 106 | ; In reality we are checking that this code doesn't assert when splitting |
| 107 | ; and inserting a spill. Here we just check that the point where the error |
| 108 | ; occurs we see a correctly generated spill. |
| 109 | ; GCN-LABEL: bb.7: |
| 110 | ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec |
| 111 | |
Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 112 | undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec |
| 113 | %15.sub1:vreg_128 = COPY %15.sub0 |
| 114 | %15.sub2:vreg_128 = COPY %15.sub0 |
| 115 | %5:vgpr_32 = IMPLICIT_DEF |
| 116 | S_BRANCH %bb.13 |
| 117 | |
| 118 | bb.8: |
| 119 | successors: %bb.9(0x40000000), %bb.10(0x40000000) |
| 120 | S_CBRANCH_SCC0 %bb.10, implicit undef $scc |
| 121 | |
| 122 | bb.9: |
| 123 | successors: %bb.12(0x80000000) |
John Brawn | 1d0d86a | 2018-12-14 14:07:57 +0000 | [diff] [blame] | 124 | |
| 125 | ; GCN-LABEL: bb.9: |
| 126 | ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec |
| 127 | |
Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 128 | undef %15.sub0:vreg_128 = V_MOV_B32_e32 0, implicit $exec |
| 129 | %15.sub1:vreg_128 = COPY %15.sub0 |
| 130 | %15.sub2:vreg_128 = COPY %15.sub0 |
| 131 | S_BRANCH %bb.12 |
| 132 | |
| 133 | bb.10: |
| 134 | successors: %bb.12(0x80000000) |
John Brawn | 1d0d86a | 2018-12-14 14:07:57 +0000 | [diff] [blame] | 135 | |
| 136 | ; GCN-LABEL: bb.10: |
| 137 | ; GCN: SI_SPILL_V128_SAVE %{{[0-9]+}}, %stack.1, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr5, 0, implicit $exec |
| 138 | |
Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 139 | undef %15.sub0:vreg_128 = V_MOV_B32_e32 2143289344, implicit $exec |
| 140 | %15.sub1:vreg_128 = COPY %15.sub0 |
| 141 | %15.sub2:vreg_128 = COPY %15.sub0 |
| 142 | S_BRANCH %bb.12 |
| 143 | |
| 144 | bb.11: |
| 145 | successors: %bb.7(0x40000000), %bb.13(0x40000000) |
| 146 | %16:sreg_64 = V_CMP_NE_U32_e64 0, %14, implicit $exec |
| 147 | %17:sreg_64 = S_AND_B64 $exec, %16, implicit-def dead $scc |
| 148 | $vcc = COPY %17 |
| 149 | S_CBRANCH_VCCNZ %bb.7, implicit $vcc |
| 150 | S_BRANCH %bb.13 |
| 151 | |
| 152 | bb.12: |
| 153 | successors: %bb.11(0x80000000) |
| 154 | %14:vgpr_32 = V_MOV_B32_e32 0, implicit $exec |
| 155 | %5:vgpr_32 = V_MOV_B32_e32 -1, implicit $exec |
| 156 | S_BRANCH %bb.11 |
| 157 | |
| 158 | bb.13: |
| 159 | successors: %bb.15(0x40000000), %bb.14(0x40000000) |
| 160 | |
Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 161 | %18:vgpr_32 = V_MAD_F32 0, %10.sub0, 0, target-flags(amdgpu-gotprel) 1073741824, 0, -1082130432, 0, 0, implicit $exec |
| 162 | %19:vgpr_32 = V_MAD_F32 0, %12.sub0, 0, target-flags(amdgpu-gotprel) 0, 0, 0, 0, 0, implicit $exec |
| 163 | %20:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %21:sreg_128, 1040, 0 :: (dereferenceable invariant load 16) |
| 164 | %22:vgpr_32 = V_ADD_F32_e32 0, %19, implicit $exec |
| 165 | %23:vgpr_32 = V_MAD_F32 0, %18, 0, 0, 0, 0, 0, 0, implicit $exec |
| 166 | %24:vgpr_32 = COPY %20.sub3 |
| 167 | %25:vgpr_32 = V_MUL_F32_e64 0, target-flags(amdgpu-gotprel32-lo) 0, 0, %20.sub1, 0, 0, implicit $exec |
| 168 | %26:sreg_128 = S_BUFFER_LOAD_DWORDX4_IMM undef %27:sreg_128, 1056, 0 :: (dereferenceable invariant load 16) |
| 169 | %28:vgpr_32 = V_MAD_F32 0, %18, 0, %26.sub0, 0, 0, 0, 0, implicit $exec |
| 170 | %29:vgpr_32 = V_ADD_F32_e32 %28, %19, implicit $exec |
| 171 | %30:vgpr_32 = V_RCP_F32_e32 %29, implicit $exec |
| 172 | %25:vgpr_32 = V_MAC_F32_e32 0, %18, %25, implicit $exec |
| 173 | %31:vgpr_32 = V_MAD_F32 0, target-flags(amdgpu-gotprel) 0, 0, %12.sub0, 0, %24, 0, 0, implicit $exec |
| 174 | %32:vgpr_32 = V_ADD_F32_e32 %25, %31, implicit $exec |
| 175 | %33:vgpr_32 = V_MUL_F32_e32 %22, %30, implicit $exec |
| 176 | %34:vgpr_32 = V_MUL_F32_e32 %23, %30, implicit $exec |
| 177 | %35:vgpr_32 = V_MUL_F32_e32 %32, %30, implicit $exec |
| 178 | %36:vgpr_32 = V_MUL_F32_e32 0, %34, implicit $exec |
| 179 | %36:vgpr_32 = V_MAC_F32_e32 0, %33, %36, implicit $exec |
| 180 | %37:vgpr_32 = V_MAD_F32 0, %35, 0, 0, 0, 0, 0, 0, implicit $exec |
| 181 | %38:sreg_64_xexec = V_CMP_NE_U32_e64 0, %5, implicit $exec |
| 182 | %39:vgpr_32 = V_CNDMASK_B32_e64 0, 1, %38, implicit $exec |
| 183 | V_CMP_NE_U32_e32 1, %39, implicit-def $vcc, implicit $exec |
| 184 | $vcc = S_AND_B64 $exec, $vcc, implicit-def dead $scc |
| 185 | %40:vgpr_32 = V_ADD_F32_e32 %36, %37, implicit $exec |
| 186 | S_CBRANCH_VCCZ %bb.15, implicit $vcc |
| 187 | |
| 188 | bb.14: |
| 189 | successors: %bb.17(0x80000000) |
| 190 | S_BRANCH %bb.17 |
| 191 | |
| 192 | bb.15: |
| 193 | successors: %bb.16(0x40000000), %bb.18(0x40000000) |
| 194 | %41:vgpr_32 = V_MAD_F32 0, %40, 0, 0, 0, 0, 0, 0, implicit $exec |
| 195 | %42:sreg_64 = V_CMP_LE_F32_e64 0, 0, 0, %41, 0, implicit $exec |
| 196 | %43:sreg_64 = V_CMP_GE_F32_e64 0, 1065353216, 0, %41, 0, implicit $exec |
| 197 | %44:sreg_64 = S_AND_B64 %43, %43, implicit-def dead $scc |
| 198 | %45:sreg_64 = S_AND_B64 %42, %42, implicit-def dead $scc |
| 199 | %46:sreg_64 = S_AND_B64 %45, %44, implicit-def dead $scc |
| 200 | %47:sreg_64 = COPY $exec, implicit-def $exec |
| 201 | %48:sreg_64 = S_AND_B64 %47, %46, implicit-def dead $scc |
| 202 | $exec = S_MOV_B64_term %48 |
| 203 | SI_MASK_BRANCH %bb.18, implicit $exec |
| 204 | S_BRANCH %bb.16 |
| 205 | |
| 206 | bb.16: |
| 207 | successors: %bb.18(0x80000000) |
| 208 | S_BRANCH %bb.18 |
| 209 | |
| 210 | bb.17: |
| 211 | successors: %bb.21(0x40000000), %bb.23(0x40000000) |
| 212 | %49:sreg_64 = V_CMP_NE_U32_e64 0, %5, implicit $exec |
| 213 | %50:sreg_64 = S_AND_B64 $exec, %49, implicit-def dead $scc |
| 214 | %51:vreg_128 = IMPLICIT_DEF |
| 215 | $vcc = COPY %50 |
| 216 | S_CBRANCH_VCCNZ %bb.21, implicit $vcc |
| 217 | S_BRANCH %bb.23 |
| 218 | |
| 219 | bb.18: |
| 220 | successors: %bb.20(0x40000000), %bb.19(0x40000000) |
| 221 | $exec = S_OR_B64 $exec, %47, implicit-def $scc |
| 222 | %52:vgpr_32 = V_MAD_F32 0, %3.sub1, 0, target-flags(amdgpu-gotprel32-lo) 0, 1, %3.sub0, 0, 0, implicit $exec |
| 223 | %53:vgpr_32 = V_MUL_F32_e32 -2147483648, %3.sub1, implicit $exec |
| 224 | %53:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel32-hi) 1065353216, %3.sub2, %53, implicit $exec |
| 225 | %54:vgpr_32 = V_MUL_F32_e32 %53, %53, implicit $exec |
| 226 | %54:vgpr_32 = V_MAC_F32_e32 %52, %52, %54, implicit $exec |
| 227 | %55:vgpr_32 = V_SQRT_F32_e32 %54, implicit $exec |
| 228 | %5:vgpr_32 = V_MOV_B32_e32 0, implicit $exec |
| 229 | %56:vgpr_32 = V_MOV_B32_e32 981668463, implicit $exec |
| 230 | %57:sreg_64 = V_CMP_NGT_F32_e64 0, %55, 0, %56, 0, implicit $exec |
| 231 | %58:sreg_64 = S_AND_B64 $exec, %57, implicit-def dead $scc |
| 232 | $vcc = COPY %58 |
| 233 | S_CBRANCH_VCCZ %bb.20, implicit $vcc |
| 234 | |
| 235 | bb.19: |
| 236 | successors: %bb.17(0x80000000) |
| 237 | S_BRANCH %bb.17 |
| 238 | |
| 239 | bb.20: |
| 240 | successors: %bb.17(0x80000000) |
| 241 | S_BRANCH %bb.17 |
| 242 | |
| 243 | bb.21: |
| 244 | successors: %bb.23(0x80000000) |
| 245 | %59:sreg_32 = S_MOV_B32 0 |
| 246 | undef %51.sub0:vreg_128 = COPY %59 |
| 247 | S_BRANCH %bb.23 |
| 248 | |
| 249 | bb.22: |
| 250 | successors: %bb.24(0x80000000) |
| 251 | S_BRANCH %bb.24 |
| 252 | |
| 253 | bb.23: |
| 254 | successors: %bb.22(0x80000000) |
| 255 | undef %60.sub1:vreg_64 = V_CVT_I32_F32_e32 %1, implicit $exec |
| 256 | %60.sub0:vreg_64 = V_CVT_I32_F32_e32 %0, implicit $exec |
| 257 | undef %61.sub0:sreg_256 = S_MOV_B32 0 |
| 258 | %61.sub1:sreg_256 = COPY %61.sub0 |
| 259 | %61.sub2:sreg_256 = COPY %61.sub0 |
| 260 | %61.sub3:sreg_256 = COPY %61.sub0 |
| 261 | %61.sub4:sreg_256 = COPY %61.sub0 |
| 262 | %61.sub5:sreg_256 = COPY %61.sub0 |
| 263 | %61.sub6:sreg_256 = COPY %61.sub0 |
| 264 | %61.sub7:sreg_256 = COPY %61.sub0 |
| 265 | %62:vgpr_32 = V_MOV_B32_e32 1033100696, implicit $exec |
| 266 | %63:vgpr_32 = V_MUL_F32_e32 1060575065, %15.sub1, implicit $exec |
| 267 | %63:vgpr_32 = V_MAC_F32_e32 1046066128, %15.sub0, %63, implicit $exec |
| 268 | %64:vgpr_32 = IMAGE_LOAD_V1_V2 %60, %61, 1, -1, 0, 0, 0, 0, 0, 0, 0, implicit $exec :: (dereferenceable load 16 from constant-pool, addrspace 4) |
| 269 | %64:vgpr_32 = V_MAC_F32_e32 target-flags(amdgpu-gotprel) 0, %51.sub0, %64, implicit $exec |
| 270 | %65:vgpr_32 = V_MUL_F32_e32 0, %64, implicit $exec |
| 271 | %66:vgpr_32 = V_MUL_F32_e32 0, %65, implicit $exec |
| 272 | %67:vgpr_32 = V_MAD_F32 0, %66, 0, %62, 0, 0, 0, 0, implicit $exec |
| 273 | %63:vgpr_32 = V_MAC_F32_e32 %15.sub2, %62, %63, implicit $exec |
| 274 | %4:vgpr_32 = V_ADD_F32_e32 %63, %67, implicit $exec |
| 275 | S_BRANCH %bb.22 |
| 276 | |
| 277 | bb.24: |
| 278 | %68:vgpr_32 = V_MUL_F32_e32 0, %4, implicit $exec |
Matt Arsenault | 709374d | 2018-08-01 20:13:58 +0000 | [diff] [blame] | 279 | %69:vgpr_32 = V_CVT_PKRTZ_F16_F32_e64 0, undef %70:vgpr_32, 0, %68, 0, 0, implicit $exec |
Krzysztof Parzyszek | 70f0270 | 2018-06-26 14:37:16 +0000 | [diff] [blame] | 280 | EXP 0, undef %71:vgpr_32, %69, undef %72:vgpr_32, undef %73:vgpr_32, -1, -1, 15, implicit $exec |
| 281 | S_ENDPGM |
| 282 | ... |