| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 1 | # RUN: llc -march=amdgcn -verify-machineinstrs -run-pass si-load-store-opt -o - %s | FileCheck %s |
| 2 | |
| 3 | # Check that SILoadStoreOptimizer honors memory dependencies between moved |
| 4 | # instructions. |
| 5 | # |
| 6 | # The following IR snippet would usually be optimized by the peephole optimizer. |
| 7 | # However, an equivalent situation can occur with buffer instructions as well. |
| 8 | |
| 9 | # CHECK-LABEL: name: mem_dependency |
| 10 | # CHECK: DS_READ2_B32 %0, 0, 1, |
| 11 | # CHECK: DS_WRITE_B32 %0, killed %1, 64, |
| 12 | # CHECK: DS_READ2_B32 %0, 16, 17, |
| 13 | # CHECK: DS_WRITE_B32 killed %0, %5, 0 |
| 14 | |
| 15 | --- | |
| 16 | define amdgpu_kernel void @mem_dependency(i32 addrspace(3)* %ptr.0) nounwind { |
| 17 | %ptr.4 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 1 |
| 18 | %ptr.64 = getelementptr i32, i32 addrspace(3)* %ptr.0, i32 16 |
| 19 | %1 = load i32, i32 addrspace(3)* %ptr.0 |
| 20 | store i32 %1, i32 addrspace(3)* %ptr.64 |
| 21 | %2 = load i32, i32 addrspace(3)* %ptr.64 |
| 22 | %3 = load i32, i32 addrspace(3)* %ptr.4 |
| 23 | %4 = add i32 %2, %3 |
| 24 | store i32 %4, i32 addrspace(3)* %ptr.0 |
| 25 | ret void |
| 26 | } |
| Matt Arsenault | b02cebf | 2018-02-08 01:56:14 +0000 | [diff] [blame] | 27 | |
| 28 | @lds0 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4 |
| 29 | @lds1 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4 |
| 30 | @lds2 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4 |
| 31 | @lds3 = external dso_local unnamed_addr addrspace(3) global [256 x i32], align 4 |
| 32 | |
| 33 | define void @asm_defines_address() #0 { |
| 34 | bb: |
| 35 | %tmp1 = load i32, i32 addrspace(3)* getelementptr inbounds ([256 x i32], [256 x i32] addrspace(3)* @lds0, i32 0, i32 0), align 4 |
| 36 | %0 = and i32 %tmp1, 255 |
| 37 | %tmp3 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 undef), align 4 |
| 38 | %tmp6 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds3, i32 0, i32 undef), align 4 |
| 39 | %tmp7 = tail call i32 asm "v_or_b32 $0, 0, $1", "=v,v"(i32 %tmp6) #1 |
| 40 | %tmp10 = lshr i32 %tmp7, 16 |
| 41 | %tmp11 = and i32 %tmp10, 255 |
| 42 | %tmp12 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 %tmp11 |
| 43 | %tmp13 = load i32, i32 addrspace(3)* %tmp12, align 4 |
| 44 | %tmp14 = xor i32 %tmp3, %tmp13 |
| 45 | %tmp15 = lshr i32 %tmp14, 8 |
| 46 | %tmp16 = and i32 %tmp15, 16711680 |
| 47 | %tmp19 = lshr i32 %tmp16, 16 |
| 48 | %tmp20 = and i32 %tmp19, 255 |
| 49 | %tmp21 = getelementptr inbounds [256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 %tmp20 |
| 50 | %tmp22 = load i32, i32 addrspace(3)* %tmp21, align 4 |
| 51 | %tmp24 = load i32, i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds2, i32 0, i32 undef), align 4 |
| 52 | %tmp25 = xor i32 %tmp22, %tmp24 |
| 53 | %tmp26 = and i32 %tmp25, -16777216 |
| 54 | %tmp28 = or i32 %0, %tmp26 |
| 55 | store volatile i32 %tmp28, i32 addrspace(1)* undef |
| 56 | ret void |
| 57 | } |
| 58 | |
| 59 | attributes #0 = { convergent nounwind } |
| 60 | attributes #1 = { convergent nounwind readnone } |
| 61 | |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 62 | ... |
| 63 | --- |
| 64 | name: mem_dependency |
| 65 | alignment: 0 |
| 66 | exposesReturnsTwice: false |
| 67 | legalized: false |
| 68 | regBankSelected: false |
| 69 | selected: false |
| 70 | tracksRegLiveness: true |
| 71 | liveins: |
| Puyan Lotfi | 43e94b1 | 2018-01-31 22:04:26 +0000 | [diff] [blame] | 72 | - { reg: '$vgpr0', virtual-reg: '%1' } |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 73 | frameInfo: |
| 74 | isFrameAddressTaken: false |
| 75 | isReturnAddressTaken: false |
| 76 | hasStackMap: false |
| 77 | hasPatchPoint: false |
| 78 | stackSize: 0 |
| 79 | offsetAdjustment: 0 |
| 80 | maxAlignment: 0 |
| 81 | adjustsStack: false |
| 82 | hasCalls: false |
| 83 | maxCallFrameSize: 0 |
| 84 | hasOpaqueSPAdjustment: false |
| 85 | hasVAStart: false |
| 86 | hasMustTailInVarArgFunc: false |
| 87 | body: | |
| 88 | bb.0: |
| Puyan Lotfi | 43e94b1 | 2018-01-31 22:04:26 +0000 | [diff] [blame] | 89 | liveins: $vgpr0 |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 90 | |
| Puyan Lotfi | 43e94b1 | 2018-01-31 22:04:26 +0000 | [diff] [blame] | 91 | %1:vgpr_32 = COPY $vgpr0 |
| 92 | $m0 = S_MOV_B32 -1 |
| 93 | %2:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.0) |
| 94 | DS_WRITE_B32 %1, killed %2, 64, 0, implicit $m0, implicit $exec :: (store 4 into %ir.ptr.64) |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 95 | |
| 96 | ; Make this load unmergeable, to tempt SILoadStoreOptimizer into merging the |
| 97 | ; other two loads. |
| Puyan Lotfi | 43e94b1 | 2018-01-31 22:04:26 +0000 | [diff] [blame] | 98 | %6:vreg_64 = DS_READ2_B32 %1, 16, 17, 0, implicit $m0, implicit $exec :: (load 8 from %ir.ptr.64, align 4) |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 99 | %3:vgpr_32 = COPY %6.sub0 |
| Puyan Lotfi | 43e94b1 | 2018-01-31 22:04:26 +0000 | [diff] [blame] | 100 | %4:vgpr_32 = DS_READ_B32 %1, 4, 0, implicit $m0, implicit $exec :: (load 4 from %ir.ptr.4) |
| 101 | %5:vgpr_32 = V_ADD_I32_e32 killed %3, killed %4, implicit-def $vcc, implicit $exec |
| 102 | DS_WRITE_B32 killed %1, %5, 0, 0, implicit killed $m0, implicit $exec :: (store 4 into %ir.ptr.0) |
| Nicolai Haehnle | dd059c1 | 2017-11-22 12:25:21 +0000 | [diff] [blame] | 103 | S_ENDPGM |
| 104 | |
| 105 | ... |
| Matt Arsenault | b02cebf | 2018-02-08 01:56:14 +0000 | [diff] [blame] | 106 | --- |
| 107 | # Make sure the asm def isn't moved after the point where it's used for |
| 108 | # the address. |
| 109 | # CHECK-LABEL: name: asm_defines_address |
| 110 | # CHECK: DS_READ2ST64_B32 |
| 111 | # CHECK: DS_READ2ST64_B32 |
| 112 | # CHECK: INLINEASM |
| 113 | # CHECK: DS_READ_B32 |
| 114 | # CHECK: DS_READ_B32 |
| 115 | name: asm_defines_address |
| 116 | tracksRegLiveness: true |
| 117 | registers: |
| 118 | - { id: 0, class: vgpr_32, preferred-register: '' } |
| 119 | body: | |
| 120 | bb.0: |
| 121 | %1:vgpr_32 = V_MOV_B32_e32 0, implicit $exec |
| 122 | %2:vgpr_32 = DS_READ_B32 %1, 3072, 0, implicit $m0, implicit $exec :: (dereferenceable load 4 from `i32 addrspace(3)* getelementptr inbounds ([256 x i32], [256 x i32] addrspace(3)* @lds0, i32 0, i32 0)`, addrspace 3) |
| 123 | %3:vgpr_32 = DS_READ_B32 %1, 2048, 0, implicit $m0, implicit $exec :: (load 4 from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds1, i32 0, i32 undef)`, addrspace 3) |
| 124 | %4:vgpr_32 = DS_READ_B32 %1, 1024, 0, implicit $m0, implicit $exec :: (load 4 from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds3, i32 0, i32 undef)`, addrspace 3) |
| 125 | INLINEASM &"v_or_b32 $0, 0, $1", 32, 327690, def %0, 327689, %4 |
| 126 | %5:vgpr_32 = DS_READ_B32 %0, 2048, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp12, addrspace 3) |
| 127 | %6:vgpr_32 = DS_READ_B32 %5, 2048, 0, implicit $m0, implicit $exec :: (load 4 from %ir.tmp21, addrspace 3) |
| 128 | %7:vgpr_32 = DS_READ_B32 %1, 0, 0, implicit $m0, implicit $exec :: (load 4 from `i32 addrspace(3)* getelementptr ([256 x i32], [256 x i32] addrspace(3)* @lds2, i32 0, i32 undef)`, addrspace 3) |
| 129 | S_SETPC_B64_return undef $sgpr30_sgpr31, implicit %6, implicit %7 |
| 130 | |
| 131 | ... |