| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 1 | ; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s |
| Matt Arsenault | e5d042c | 2015-09-28 20:54:46 +0000 | [diff] [blame] | 2 | ; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI %s |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 3 | |
| 4 | declare i32 @llvm.r600.read.tidig.x() #0 |
| 5 | declare i32 @llvm.r600.read.tidig.y() #0 |
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 6 | |
| 7 | ; In this test both the pointer and the offset operands to the |
| 8 | ; BUFFER_LOAD instructions end up being stored in vgprs. This |
| 9 | ; requires us to add the pointer and offset together, store the |
| 10 | ; result in the offset operand (vaddr), and then store 0 in an |
| 11 | ; sgpr register pair and use that for the pointer operand |
| 12 | ; (low 64-bits of srsrc). |
| 13 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 14 | ; GCN-LABEL: {{^}}mubuf: |
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 15 | |
| Tom Stellard | 326d6ec | 2014-11-05 14:50:53 +0000 | [diff] [blame] | 16 | ; Make sure we aren't using VGPRs for the source operand of s_mov_b64 |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 17 | ; GCN-NOT: s_mov_b64 s[{{[0-9]+:[0-9]+}}], v |
| Tom Stellard | 1583409 | 2014-03-21 15:51:57 +0000 | [diff] [blame] | 18 | |
| 19 | ; Make sure we aren't using VGPR's for the srsrc operand of BUFFER_LOAD_* |
| 20 | ; instructions |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 21 | ; GCN: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 |
| 22 | ; GCN: buffer_load_ubyte v{{[0-9]+}}, v[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], 0 addr64 |
| 23 | |
| 24 | define void @mubuf(i32 addrspace(1)* %out, i8 addrspace(1)* %in) #1 { |
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 25 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 26 | %tmp = call i32 @llvm.r600.read.tidig.x() |
| 27 | %tmp1 = call i32 @llvm.r600.read.tidig.y() |
| 28 | %tmp2 = sext i32 %tmp to i64 |
| 29 | %tmp3 = sext i32 %tmp1 to i64 |
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 30 | br label %loop |
| 31 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 32 | loop: ; preds = %loop, %entry |
| 33 | %tmp4 = phi i64 [ 0, %entry ], [ %tmp5, %loop ] |
| 34 | %tmp5 = add i64 %tmp2, %tmp4 |
| 35 | %tmp6 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp5 |
| 36 | %tmp7 = load i8, i8 addrspace(1)* %tmp6, align 1 |
| 37 | %tmp8 = or i64 %tmp5, 1 |
| 38 | %tmp9 = getelementptr i8, i8 addrspace(1)* %in, i64 %tmp8 |
| 39 | %tmp10 = load i8, i8 addrspace(1)* %tmp9, align 1 |
| 40 | %tmp11 = add i8 %tmp7, %tmp10 |
| 41 | %tmp12 = sext i8 %tmp11 to i32 |
| 42 | store i32 %tmp12, i32 addrspace(1)* %out |
| 43 | %tmp13 = icmp slt i64 %tmp5, 10 |
| 44 | br i1 %tmp13, label %loop, label %done |
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 45 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 46 | done: ; preds = %loop |
| Tom Stellard | e038720 | 2014-03-21 15:51:54 +0000 | [diff] [blame] | 47 | ret void |
| 48 | } |
| 49 | |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 50 | ; Test moving an SMRD instruction to the VALU |
| 51 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 52 | ; GCN-LABEL: {{^}}smrd_valu: |
| 53 | ; GCN: buffer_load_dword [[OUT:v[0-9]+]] |
| 54 | ; GCN: buffer_store_dword [[OUT]] |
| 55 | define void @smrd_valu(i32 addrspace(2)* addrspace(1)* %in, i32 %a, i32 %b, i32 addrspace(1)* %out) #1 { |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 56 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 57 | %tmp = icmp ne i32 %a, 0 |
| 58 | br i1 %tmp, label %if, label %else |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 59 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 60 | if: ; preds = %entry |
| 61 | %tmp1 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 62 | br label %endif |
| 63 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 64 | else: ; preds = %entry |
| 65 | %tmp2 = getelementptr i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %in |
| 66 | %tmp3 = load i32 addrspace(2)*, i32 addrspace(2)* addrspace(1)* %tmp2 |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 67 | br label %endif |
| 68 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 69 | endif: ; preds = %else, %if |
| 70 | %tmp4 = phi i32 addrspace(2)* [ %tmp1, %if ], [ %tmp3, %else ] |
| 71 | %tmp5 = getelementptr i32, i32 addrspace(2)* %tmp4, i32 3000 |
| 72 | %tmp6 = load i32, i32 addrspace(2)* %tmp5 |
| 73 | store i32 %tmp6, i32 addrspace(1)* %out |
| Tom Stellard | 0c354f2 | 2014-04-30 15:31:29 +0000 | [diff] [blame] | 74 | ret void |
| 75 | } |
| Tom Stellard | 4c00b52 | 2014-05-09 16:42:22 +0000 | [diff] [blame] | 76 | |
| Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 77 | ; Test moving an SMRD with an immediate offset to the VALU |
| Tom Stellard | 4c00b52 | 2014-05-09 16:42:22 +0000 | [diff] [blame] | 78 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 79 | ; GCN-LABEL: {{^}}smrd_valu2: |
| 80 | ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}} |
| 81 | define void @smrd_valu2(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in) #1 { |
| Tom Stellard | 4c00b52 | 2014-05-09 16:42:22 +0000 | [diff] [blame] | 82 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 83 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 84 | %tmp1 = add i32 %tmp, 4 |
| 85 | %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4 |
| 86 | %tmp3 = load i32, i32 addrspace(2)* %tmp2 |
| 87 | store i32 %tmp3, i32 addrspace(1)* %out |
| Tom Stellard | 4c00b52 | 2014-05-09 16:42:22 +0000 | [diff] [blame] | 88 | ret void |
| 89 | } |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 90 | |
| Matt Arsenault | e5d042c | 2015-09-28 20:54:46 +0000 | [diff] [blame] | 91 | ; Use a big offset that will use the SMRD literal offset on CI |
| 92 | ; GCN-LABEL: {{^}}smrd_valu_ci_offset: |
| 93 | ; GCN: s_movk_i32 s[[OFFSET:[0-9]+]], 0x4e20{{$}} |
| 94 | ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET]]:{{[0-9]+}}], 0 addr64{{$}} |
| 95 | ; GCN: v_add_i32_e32 |
| 96 | ; GCN: buffer_store_dword |
| 97 | define void @smrd_valu_ci_offset(i32 addrspace(1)* %out, i32 addrspace(2)* %in, i32 %c) #1 { |
| 98 | entry: |
| 99 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 100 | %tmp2 = getelementptr i32, i32 addrspace(2)* %in, i32 %tmp |
| 101 | %tmp3 = getelementptr i32, i32 addrspace(2)* %tmp2, i32 5000 |
| 102 | %tmp4 = load i32, i32 addrspace(2)* %tmp3 |
| 103 | %tmp5 = add i32 %tmp4, %c |
| 104 | store i32 %tmp5, i32 addrspace(1)* %out |
| 105 | ret void |
| 106 | } |
| 107 | |
| 108 | ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x2: |
| 109 | ; GCN: s_mov_b32 s[[OFFSET:[0-9]+]], 0x9c40{{$}} |
| 110 | ; GCN: buffer_load_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET]]:{{[0-9]+}}], 0 addr64{{$}} |
| 111 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 112 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 113 | ; GCN: buffer_store_dwordx2 |
| 114 | define void @smrd_valu_ci_offset_x2(i64 addrspace(1)* %out, i64 addrspace(2)* %in, i64 %c) #1 { |
| 115 | entry: |
| 116 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 117 | %tmp2 = getelementptr i64, i64 addrspace(2)* %in, i32 %tmp |
| 118 | %tmp3 = getelementptr i64, i64 addrspace(2)* %tmp2, i32 5000 |
| 119 | %tmp4 = load i64, i64 addrspace(2)* %tmp3 |
| 120 | %tmp5 = or i64 %tmp4, %c |
| 121 | store i64 %tmp5, i64 addrspace(1)* %out |
| 122 | ret void |
| 123 | } |
| 124 | |
| 125 | ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x4: |
| 126 | ; GCN: s_movk_i32 s[[OFFSET:[0-9]+]], 0x4d20{{$}} |
| 127 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET]]:{{[0-9]+}}], 0 addr64{{$}} |
| 128 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 129 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 130 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 131 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 132 | ; GCN: buffer_store_dwordx4 |
| 133 | define void @smrd_valu_ci_offset_x4(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(2)* %in, <4 x i32> %c) #1 { |
| 134 | entry: |
| 135 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 136 | %tmp2 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %in, i32 %tmp |
| 137 | %tmp3 = getelementptr <4 x i32>, <4 x i32> addrspace(2)* %tmp2, i32 1234 |
| 138 | %tmp4 = load <4 x i32>, <4 x i32> addrspace(2)* %tmp3 |
| 139 | %tmp5 = or <4 x i32> %tmp4, %c |
| 140 | store <4 x i32> %tmp5, <4 x i32> addrspace(1)* %out |
| 141 | ret void |
| 142 | } |
| 143 | |
| 144 | ; Original scalar load uses SGPR offset on SI and 32-bit literal on |
| 145 | ; CI. |
| 146 | |
| 147 | ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x8: |
| 148 | ; GCN: s_mov_b32 s[[OFFSET0:[0-9]+]], 0x9a40{{$}} |
| 149 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET0]]:{{[0-9]+}}], 0 addr64{{$}} |
| 150 | |
| 151 | ; SI: s_add_i32 s[[OFFSET1:[0-9]+]], s[[OFFSET0]], 16 |
| 152 | ; SI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET1]]:{{[0-9]+}}], 0 addr64{{$}} |
| 153 | |
| 154 | ; CI: s_mov_b32 s[[OFFSET1:[0-9]+]], 0x9a50{{$}} |
| 155 | ; CI: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET1]]:{{[0-9]+}}], 0 addr64{{$}} |
| 156 | |
| 157 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 158 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 159 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 160 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 161 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 162 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 163 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 164 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 165 | ; GCN: buffer_store_dword |
| 166 | ; GCN: buffer_store_dword |
| 167 | ; GCN: buffer_store_dword |
| 168 | ; GCN: buffer_store_dword |
| 169 | ; GCN: buffer_store_dword |
| 170 | ; GCN: buffer_store_dword |
| 171 | ; GCN: buffer_store_dword |
| 172 | ; GCN: buffer_store_dword |
| 173 | define void @smrd_valu_ci_offset_x8(<8 x i32> addrspace(1)* %out, <8 x i32> addrspace(2)* %in, <8 x i32> %c) #1 { |
| 174 | entry: |
| 175 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 176 | %tmp2 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %in, i32 %tmp |
| 177 | %tmp3 = getelementptr <8 x i32>, <8 x i32> addrspace(2)* %tmp2, i32 1234 |
| 178 | %tmp4 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp3 |
| 179 | %tmp5 = or <8 x i32> %tmp4, %c |
| 180 | store <8 x i32> %tmp5, <8 x i32> addrspace(1)* %out |
| 181 | ret void |
| 182 | } |
| 183 | |
| Matt Arsenault | 73aa8f6 | 2015-09-28 20:54:52 +0000 | [diff] [blame^] | 184 | ; FIXME: should use immediate offset instead of using s_add_i32 for adding to constant. |
| 185 | ; GCN-LABEL: {{^}}smrd_valu_ci_offset_x16: |
| 186 | |
| 187 | ; GCN: s_mov_b32 s[[OFFSET0:[0-9]+]], 0x13480{{$}} |
| 188 | ; SI: s_add_i32 s[[OFFSET1:[0-9]+]], s[[OFFSET0]], 16 |
| 189 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET0]]:{{[0-9]+}}], 0 addr64{{$}} |
| 190 | |
| 191 | ; CI: s_mov_b32 s[[OFFSET1:[0-9]+]], 0x13490{{$}} |
| 192 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET1]]:{{[0-9]+}}], 0 addr64{{$}} |
| 193 | |
| 194 | ; SI: s_add_i32 s[[OFFSET2:[0-9]+]], s[[OFFSET0]], 32 |
| 195 | ; CI: s_mov_b32 s[[OFFSET2:[0-9]+]], 0x134a0 |
| 196 | |
| 197 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET2]]:{{[0-9]+}}], 0 addr64{{$}} |
| 198 | ; GCN: s_add_i32 s[[OFFSET3:[0-9]+]], s[[OFFSET2]], 16 |
| 199 | ; GCN: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET3]]:{{[0-9]+}}], 0 addr64{{$}} |
| 200 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 201 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 202 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 203 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 204 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 205 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 206 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 207 | ; GCN: v_or_b32_e32 {{v[0-9]+}}, {{s[0-9]+}}, {{v[0-9]+}} |
| 208 | ; GCN: buffer_store_dword |
| 209 | ; GCN: buffer_store_dword |
| 210 | ; GCN: buffer_store_dword |
| 211 | ; GCN: buffer_store_dword |
| 212 | ; GCN: buffer_store_dword |
| 213 | ; GCN: buffer_store_dword |
| 214 | ; GCN: buffer_store_dword |
| 215 | ; GCN: buffer_store_dword |
| 216 | define void @smrd_valu_ci_offset_x16(<16 x i32> addrspace(1)* %out, <16 x i32> addrspace(2)* %in, <16 x i32> %c) #1 { |
| 217 | entry: |
| 218 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 219 | %tmp2 = getelementptr <16 x i32>, <16 x i32> addrspace(2)* %in, i32 %tmp |
| 220 | %tmp3 = getelementptr <16 x i32>, <16 x i32> addrspace(2)* %tmp2, i32 1234 |
| 221 | %tmp4 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp3 |
| 222 | %tmp5 = or <16 x i32> %tmp4, %c |
| 223 | store <16 x i32> %tmp5, <16 x i32> addrspace(1)* %out |
| 224 | ret void |
| 225 | } |
| 226 | |
| Matt Arsenault | b378f07 | 2015-09-28 20:54:38 +0000 | [diff] [blame] | 227 | ; GCN-LABEL: {{^}}smrd_valu2_salu_user: |
| 228 | ; GCN: buffer_load_dword [[MOVED:v[0-9]+]], v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:16{{$}} |
| 229 | ; GCN: v_add_i32_e32 [[ADD:v[0-9]+]], vcc, s{{[0-9]+}}, [[MOVED]] |
| 230 | ; GCN: buffer_store_dword [[ADD]] |
| 231 | define void @smrd_valu2_salu_user(i32 addrspace(1)* %out, [8 x i32] addrspace(2)* %in, i32 %a) #1 { |
| 232 | entry: |
| 233 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 234 | %tmp1 = add i32 %tmp, 4 |
| 235 | %tmp2 = getelementptr [8 x i32], [8 x i32] addrspace(2)* %in, i32 %tmp, i32 4 |
| 236 | %tmp3 = load i32, i32 addrspace(2)* %tmp2 |
| 237 | %tmp4 = add i32 %tmp3, %a |
| 238 | store i32 %tmp4, i32 addrspace(1)* %out |
| 239 | ret void |
| 240 | } |
| 241 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 242 | ; GCN-LABEL: {{^}}smrd_valu2_max_smrd_offset: |
| 243 | ; GCN: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1020{{$}} |
| 244 | define void @smrd_valu2_max_smrd_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 { |
| Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 245 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 246 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 247 | %tmp1 = add i32 %tmp, 4 |
| 248 | %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 255 |
| 249 | %tmp3 = load i32, i32 addrspace(2)* %tmp2 |
| 250 | store i32 %tmp3, i32 addrspace(1)* %out |
| Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 251 | ret void |
| 252 | } |
| 253 | |
| 254 | ; Offset is too big to fit in SMRD 8-bit offset, but small enough to |
| 255 | ; fit in MUBUF offset. |
| 256 | ; FIXME: We should be using the offset but we don't |
| 257 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 258 | ; GCN-LABEL: {{^}}smrd_valu2_mubuf_offset: |
| Matt Arsenault | e5d042c | 2015-09-28 20:54:46 +0000 | [diff] [blame] | 259 | ; SI: s_movk_i32 s[[OFFSET:[0-9]+]], 0x400{{$}} |
| 260 | ; SI: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[OFFSET]]:{{[0-9]+\]}}, 0 addr64{{$}} |
| 261 | |
| 262 | ; CI: buffer_load_dword v{{[0-9]+}}, v{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:1024{{$}} |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 263 | define void @smrd_valu2_mubuf_offset(i32 addrspace(1)* %out, [1024 x i32] addrspace(2)* %in) #1 { |
| Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 264 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 265 | %tmp = call i32 @llvm.r600.read.tidig.x() #0 |
| 266 | %tmp1 = add i32 %tmp, 4 |
| 267 | %tmp2 = getelementptr [1024 x i32], [1024 x i32] addrspace(2)* %in, i32 %tmp, i32 256 |
| 268 | %tmp3 = load i32, i32 addrspace(2)* %tmp2 |
| 269 | store i32 %tmp3, i32 addrspace(1)* %out |
| Matt Arsenault | 711b390 | 2015-08-07 20:18:34 +0000 | [diff] [blame] | 270 | ret void |
| 271 | } |
| 272 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 273 | ; GCN-LABEL: {{^}}s_load_imm_v8i32: |
| 274 | ; GCN: buffer_load_dwordx4 |
| 275 | ; GCN: buffer_load_dwordx4 |
| 276 | define void @s_load_imm_v8i32(<8 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 { |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 277 | entry: |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 278 | %tmp0 = tail call i32 @llvm.r600.read.tidig.x() |
| David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 279 | %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 280 | %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)* |
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 281 | %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4 |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 282 | store <8 x i32> %tmp3, <8 x i32> addrspace(1)* %out, align 32 |
| 283 | ret void |
| 284 | } |
| 285 | |
| Matt Arsenault | b378f07 | 2015-09-28 20:54:38 +0000 | [diff] [blame] | 286 | ; GCN-LABEL: {{^}}s_load_imm_v8i32_salu_user: |
| 287 | ; GCN: buffer_load_dwordx4 |
| 288 | ; GCN: buffer_load_dwordx4 |
| 289 | ; GCN: v_add_i32_e32 |
| 290 | ; GCN: v_add_i32_e32 |
| 291 | ; GCN: v_add_i32_e32 |
| 292 | ; GCN: v_add_i32_e32 |
| 293 | ; GCN: v_add_i32_e32 |
| 294 | ; GCN: v_add_i32_e32 |
| 295 | ; GCN: v_add_i32_e32 |
| 296 | ; GCN: buffer_store_dword |
| 297 | define void @s_load_imm_v8i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 { |
| 298 | entry: |
| 299 | %tmp0 = tail call i32 @llvm.r600.read.tidig.x() |
| 300 | %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 |
| 301 | %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <8 x i32> addrspace(2)* |
| 302 | %tmp3 = load <8 x i32>, <8 x i32> addrspace(2)* %tmp2, align 4 |
| 303 | |
| 304 | %elt0 = extractelement <8 x i32> %tmp3, i32 0 |
| 305 | %elt1 = extractelement <8 x i32> %tmp3, i32 1 |
| 306 | %elt2 = extractelement <8 x i32> %tmp3, i32 2 |
| 307 | %elt3 = extractelement <8 x i32> %tmp3, i32 3 |
| 308 | %elt4 = extractelement <8 x i32> %tmp3, i32 4 |
| 309 | %elt5 = extractelement <8 x i32> %tmp3, i32 5 |
| 310 | %elt6 = extractelement <8 x i32> %tmp3, i32 6 |
| 311 | %elt7 = extractelement <8 x i32> %tmp3, i32 7 |
| 312 | |
| 313 | %add0 = add i32 %elt0, %elt1 |
| 314 | %add1 = add i32 %add0, %elt2 |
| 315 | %add2 = add i32 %add1, %elt3 |
| 316 | %add3 = add i32 %add2, %elt4 |
| 317 | %add4 = add i32 %add3, %elt5 |
| 318 | %add5 = add i32 %add4, %elt6 |
| 319 | %add6 = add i32 %add5, %elt7 |
| 320 | |
| 321 | store i32 %add6, i32 addrspace(1)* %out |
| 322 | ret void |
| 323 | } |
| 324 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 325 | ; GCN-LABEL: {{^}}s_load_imm_v16i32: |
| 326 | ; GCN: buffer_load_dwordx4 |
| 327 | ; GCN: buffer_load_dwordx4 |
| 328 | ; GCN: buffer_load_dwordx4 |
| 329 | ; GCN: buffer_load_dwordx4 |
| 330 | define void @s_load_imm_v16i32(<16 x i32> addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 { |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 331 | entry: |
| 332 | %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1 |
| David Blaikie | 79e6c74 | 2015-02-27 19:29:02 +0000 | [diff] [blame] | 333 | %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 334 | %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)* |
| David Blaikie | a79ac14 | 2015-02-27 21:17:42 +0000 | [diff] [blame] | 335 | %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4 |
| Tom Stellard | 745f2ed | 2014-08-21 20:41:00 +0000 | [diff] [blame] | 336 | store <16 x i32> %tmp3, <16 x i32> addrspace(1)* %out, align 32 |
| 337 | ret void |
| 338 | } |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 339 | |
| Matt Arsenault | b378f07 | 2015-09-28 20:54:38 +0000 | [diff] [blame] | 340 | ; GCN-LABEL: {{^}}s_load_imm_v16i32_salu_user: |
| 341 | ; GCN: buffer_load_dwordx4 |
| 342 | ; GCN: buffer_load_dwordx4 |
| 343 | ; GCN: buffer_load_dwordx4 |
| 344 | ; GCN: buffer_load_dwordx4 |
| 345 | ; GCN: v_add_i32_e32 |
| 346 | ; GCN: v_add_i32_e32 |
| 347 | ; GCN: v_add_i32_e32 |
| 348 | ; GCN: v_add_i32_e32 |
| 349 | ; GCN: v_add_i32_e32 |
| 350 | ; GCN: v_add_i32_e32 |
| 351 | ; GCN: v_add_i32_e32 |
| 352 | ; GCN: v_add_i32_e32 |
| 353 | ; GCN: v_add_i32_e32 |
| 354 | ; GCN: v_add_i32_e32 |
| 355 | ; GCN: v_add_i32_e32 |
| 356 | ; GCN: v_add_i32_e32 |
| 357 | ; GCN: v_add_i32_e32 |
| 358 | ; GCN: v_add_i32_e32 |
| 359 | ; GCN: v_add_i32_e32 |
| 360 | ; GCN: buffer_store_dword |
| 361 | define void @s_load_imm_v16i32_salu_user(i32 addrspace(1)* %out, i32 addrspace(2)* nocapture readonly %in) #1 { |
| 362 | entry: |
| 363 | %tmp0 = tail call i32 @llvm.r600.read.tidig.x() #1 |
| 364 | %tmp1 = getelementptr inbounds i32, i32 addrspace(2)* %in, i32 %tmp0 |
| 365 | %tmp2 = bitcast i32 addrspace(2)* %tmp1 to <16 x i32> addrspace(2)* |
| 366 | %tmp3 = load <16 x i32>, <16 x i32> addrspace(2)* %tmp2, align 4 |
| 367 | |
| 368 | %elt0 = extractelement <16 x i32> %tmp3, i32 0 |
| 369 | %elt1 = extractelement <16 x i32> %tmp3, i32 1 |
| 370 | %elt2 = extractelement <16 x i32> %tmp3, i32 2 |
| 371 | %elt3 = extractelement <16 x i32> %tmp3, i32 3 |
| 372 | %elt4 = extractelement <16 x i32> %tmp3, i32 4 |
| 373 | %elt5 = extractelement <16 x i32> %tmp3, i32 5 |
| 374 | %elt6 = extractelement <16 x i32> %tmp3, i32 6 |
| 375 | %elt7 = extractelement <16 x i32> %tmp3, i32 7 |
| 376 | %elt8 = extractelement <16 x i32> %tmp3, i32 8 |
| 377 | %elt9 = extractelement <16 x i32> %tmp3, i32 9 |
| 378 | %elt10 = extractelement <16 x i32> %tmp3, i32 10 |
| 379 | %elt11 = extractelement <16 x i32> %tmp3, i32 11 |
| 380 | %elt12 = extractelement <16 x i32> %tmp3, i32 12 |
| 381 | %elt13 = extractelement <16 x i32> %tmp3, i32 13 |
| 382 | %elt14 = extractelement <16 x i32> %tmp3, i32 14 |
| 383 | %elt15 = extractelement <16 x i32> %tmp3, i32 15 |
| 384 | |
| 385 | %add0 = add i32 %elt0, %elt1 |
| 386 | %add1 = add i32 %add0, %elt2 |
| 387 | %add2 = add i32 %add1, %elt3 |
| 388 | %add3 = add i32 %add2, %elt4 |
| 389 | %add4 = add i32 %add3, %elt5 |
| 390 | %add5 = add i32 %add4, %elt6 |
| 391 | %add6 = add i32 %add5, %elt7 |
| 392 | %add7 = add i32 %add6, %elt8 |
| 393 | %add8 = add i32 %add7, %elt9 |
| 394 | %add9 = add i32 %add8, %elt10 |
| 395 | %add10 = add i32 %add9, %elt11 |
| 396 | %add11 = add i32 %add10, %elt12 |
| 397 | %add12 = add i32 %add11, %elt13 |
| 398 | %add13 = add i32 %add12, %elt14 |
| 399 | %add14 = add i32 %add13, %elt15 |
| 400 | |
| 401 | store i32 %add14, i32 addrspace(1)* %out |
| 402 | ret void |
| 403 | } |
| 404 | |
| Matt Arsenault | f3c91f5 | 2015-09-28 20:54:32 +0000 | [diff] [blame] | 405 | attributes #0 = { nounwind readnone } |
| 406 | attributes #1 = { nounwind } |