Matt Arsenault | 3f98140 | 2014-09-15 15:41:53 +0000 | [diff] [blame^] | 1 | ; RUN: llc -O0 -march=r600 -mcpu=bonaire -mattr=-promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-NO-PROMOTE %s |
| 2 | ; RUN: llc -O0 -march=r600 -mcpu=bonaire -mattr=+promote-alloca < %s | FileCheck -check-prefix=CHECK -check-prefix=CHECK-PROMOTE %s |
| 3 | |
| 4 | ; Disable optimizations in case there are optimizations added that |
| 5 | ; specialize away generic pointer accesses. |
| 6 | |
| 7 | |
| 8 | ; CHECK-LABEL: @branch_use_flat_i32: |
| 9 | ; CHECK: FLAT_STORE_DWORD {{v[0-9]+}}, {{v\[[0-9]+:[0-9]+\]}}, [M0, FLAT_SCRATCH] |
| 10 | ; CHECK: S_ENDPGM |
| 11 | define void @branch_use_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* %gptr, i32 addrspace(3)* %lptr, i32 %x, i32 %c) #0 { |
| 12 | entry: |
| 13 | %cmp = icmp ne i32 %c, 0 |
| 14 | br i1 %cmp, label %local, label %global |
| 15 | |
| 16 | local: |
| 17 | %flat_local = addrspacecast i32 addrspace(3)* %lptr to i32 addrspace(4)* |
| 18 | br label %end |
| 19 | |
| 20 | global: |
| 21 | %flat_global = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)* |
| 22 | br label %end |
| 23 | |
| 24 | end: |
| 25 | %fptr = phi i32 addrspace(4)* [ %flat_local, %local ], [ %flat_global, %global ] |
| 26 | store i32 %x, i32 addrspace(4)* %fptr, align 4 |
| 27 | ; %val = load i32 addrspace(4)* %fptr, align 4 |
| 28 | ; store i32 %val, i32 addrspace(1)* %out, align 4 |
| 29 | ret void |
| 30 | } |
| 31 | |
| 32 | |
| 33 | |
| 34 | ; These testcases might become useless when there are optimizations to |
| 35 | ; remove generic pointers. |
| 36 | |
| 37 | ; CHECK-LABEL: @store_flat_i32: |
| 38 | ; CHECK: V_MOV_B32_e32 v[[DATA:[0-9]+]], {{s[0-9]+}} |
| 39 | ; CHECK: V_MOV_B32_e32 v[[LO_VREG:[0-9]+]], {{s[0-9]+}} |
| 40 | ; CHECK: V_MOV_B32_e32 v[[HI_VREG:[0-9]+]], {{s[0-9]+}} |
| 41 | ; CHECK: FLAT_STORE_DWORD v[[DATA]], v{{\[}}[[LO_VREG]]:[[HI_VREG]]{{\]}} |
| 42 | define void @store_flat_i32(i32 addrspace(1)* %gptr, i32 %x) #0 { |
| 43 | %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)* |
| 44 | store i32 %x, i32 addrspace(4)* %fptr, align 4 |
| 45 | ret void |
| 46 | } |
| 47 | |
| 48 | ; CHECK-LABEL: @store_flat_i64: |
| 49 | ; CHECK: FLAT_STORE_DWORDX2 |
| 50 | define void @store_flat_i64(i64 addrspace(1)* %gptr, i64 %x) #0 { |
| 51 | %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)* |
| 52 | store i64 %x, i64 addrspace(4)* %fptr, align 8 |
| 53 | ret void |
| 54 | } |
| 55 | |
| 56 | ; CHECK-LABEL: @store_flat_v4i32: |
| 57 | ; CHECK: FLAT_STORE_DWORDX4 |
| 58 | define void @store_flat_v4i32(<4 x i32> addrspace(1)* %gptr, <4 x i32> %x) #0 { |
| 59 | %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)* |
| 60 | store <4 x i32> %x, <4 x i32> addrspace(4)* %fptr, align 16 |
| 61 | ret void |
| 62 | } |
| 63 | |
| 64 | ; CHECK-LABEL: @store_flat_trunc_i16: |
| 65 | ; CHECK: FLAT_STORE_SHORT |
| 66 | define void @store_flat_trunc_i16(i16 addrspace(1)* %gptr, i32 %x) #0 { |
| 67 | %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)* |
| 68 | %y = trunc i32 %x to i16 |
| 69 | store i16 %y, i16 addrspace(4)* %fptr, align 2 |
| 70 | ret void |
| 71 | } |
| 72 | |
| 73 | ; CHECK-LABEL: @store_flat_trunc_i8: |
| 74 | ; CHECK: FLAT_STORE_BYTE |
| 75 | define void @store_flat_trunc_i8(i8 addrspace(1)* %gptr, i32 %x) #0 { |
| 76 | %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)* |
| 77 | %y = trunc i32 %x to i8 |
| 78 | store i8 %y, i8 addrspace(4)* %fptr, align 2 |
| 79 | ret void |
| 80 | } |
| 81 | |
| 82 | |
| 83 | |
| 84 | ; CHECK-LABEL @load_flat_i32: |
| 85 | ; CHECK: FLAT_LOAD_DWORD |
| 86 | define void @load_flat_i32(i32 addrspace(1)* noalias %out, i32 addrspace(1)* noalias %gptr) #0 { |
| 87 | %fptr = addrspacecast i32 addrspace(1)* %gptr to i32 addrspace(4)* |
| 88 | %fload = load i32 addrspace(4)* %fptr, align 4 |
| 89 | store i32 %fload, i32 addrspace(1)* %out, align 4 |
| 90 | ret void |
| 91 | } |
| 92 | |
| 93 | ; CHECK-LABEL @load_flat_i64: |
| 94 | ; CHECK: FLAT_LOAD_DWORDX2 |
| 95 | define void @load_flat_i64(i64 addrspace(1)* noalias %out, i64 addrspace(1)* noalias %gptr) #0 { |
| 96 | %fptr = addrspacecast i64 addrspace(1)* %gptr to i64 addrspace(4)* |
| 97 | %fload = load i64 addrspace(4)* %fptr, align 4 |
| 98 | store i64 %fload, i64 addrspace(1)* %out, align 8 |
| 99 | ret void |
| 100 | } |
| 101 | |
| 102 | ; CHECK-LABEL @load_flat_v4i32: |
| 103 | ; CHECK: FLAT_LOAD_DWORDX4 |
| 104 | define void @load_flat_v4i32(<4 x i32> addrspace(1)* noalias %out, <4 x i32> addrspace(1)* noalias %gptr) #0 { |
| 105 | %fptr = addrspacecast <4 x i32> addrspace(1)* %gptr to <4 x i32> addrspace(4)* |
| 106 | %fload = load <4 x i32> addrspace(4)* %fptr, align 4 |
| 107 | store <4 x i32> %fload, <4 x i32> addrspace(1)* %out, align 8 |
| 108 | ret void |
| 109 | } |
| 110 | |
| 111 | ; CHECK-LABEL @sextload_flat_i8: |
| 112 | ; CHECK: FLAT_LOAD_SBYTE |
| 113 | define void @sextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 { |
| 114 | %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)* |
| 115 | %fload = load i8 addrspace(4)* %fptr, align 4 |
| 116 | %ext = sext i8 %fload to i32 |
| 117 | store i32 %ext, i32 addrspace(1)* %out, align 4 |
| 118 | ret void |
| 119 | } |
| 120 | |
| 121 | ; CHECK-LABEL @zextload_flat_i8: |
| 122 | ; CHECK: FLAT_LOAD_UBYTE |
| 123 | define void @zextload_flat_i8(i32 addrspace(1)* noalias %out, i8 addrspace(1)* noalias %gptr) #0 { |
| 124 | %fptr = addrspacecast i8 addrspace(1)* %gptr to i8 addrspace(4)* |
| 125 | %fload = load i8 addrspace(4)* %fptr, align 4 |
| 126 | %ext = zext i8 %fload to i32 |
| 127 | store i32 %ext, i32 addrspace(1)* %out, align 4 |
| 128 | ret void |
| 129 | } |
| 130 | |
| 131 | ; CHECK-LABEL @sextload_flat_i16: |
| 132 | ; CHECK: FLAT_LOAD_SSHORT |
| 133 | define void @sextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 { |
| 134 | %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)* |
| 135 | %fload = load i16 addrspace(4)* %fptr, align 4 |
| 136 | %ext = sext i16 %fload to i32 |
| 137 | store i32 %ext, i32 addrspace(1)* %out, align 4 |
| 138 | ret void |
| 139 | } |
| 140 | |
| 141 | ; CHECK-LABEL @zextload_flat_i16: |
| 142 | ; CHECK: FLAT_LOAD_USHORT |
| 143 | define void @zextload_flat_i16(i32 addrspace(1)* noalias %out, i16 addrspace(1)* noalias %gptr) #0 { |
| 144 | %fptr = addrspacecast i16 addrspace(1)* %gptr to i16 addrspace(4)* |
| 145 | %fload = load i16 addrspace(4)* %fptr, align 4 |
| 146 | %ext = zext i16 %fload to i32 |
| 147 | store i32 %ext, i32 addrspace(1)* %out, align 4 |
| 148 | ret void |
| 149 | } |
| 150 | |
| 151 | |
| 152 | |
| 153 | ; TODO: This should not be zero when registers are used for small |
| 154 | ; scratch allocations again. |
| 155 | |
| 156 | ; Check for prologue initializing special SGPRs pointing to scratch. |
| 157 | ; CHECK-LABEL: @store_flat_scratch: |
| 158 | ; CHECK: S_MOVK_I32 flat_scratch_lo, 0 |
| 159 | ; CHECK-NO-PROMOTE: S_MOVK_I32 flat_scratch_hi, 40 |
| 160 | ; CHECK-PROMOTE: S_MOVK_I32 flat_scratch_hi, 0 |
| 161 | ; CHECK: FLAT_STORE_DWORD |
| 162 | ; CHECK: S_BARRIER |
| 163 | ; CHECK: FLAT_LOAD_DWORD |
| 164 | define void @store_flat_scratch(i32 addrspace(1)* noalias %out, i32) #0 { |
| 165 | %alloca = alloca i32, i32 9, align 4 |
| 166 | %x = call i32 @llvm.r600.read.tidig.x() #3 |
| 167 | %pptr = getelementptr i32* %alloca, i32 %x |
| 168 | %fptr = addrspacecast i32* %pptr to i32 addrspace(4)* |
| 169 | store i32 %x, i32 addrspace(4)* %fptr |
| 170 | ; Dummy call |
| 171 | call void @llvm.AMDGPU.barrier.local() #1 |
| 172 | %reload = load i32 addrspace(4)* %fptr, align 4 |
| 173 | store i32 %reload, i32 addrspace(1)* %out, align 4 |
| 174 | ret void |
| 175 | } |
| 176 | |
| 177 | declare void @llvm.AMDGPU.barrier.local() #1 |
| 178 | declare i32 @llvm.r600.read.tidig.x() #3 |
| 179 | |
| 180 | attributes #0 = { nounwind } |
| 181 | attributes #1 = { nounwind noduplicate } |
| 182 | attributes #3 = { nounwind readnone } |