blob: 9f57dd2f3214df4eca63bb43cf97e19fb623ef05 [file] [log] [blame]
Matt Arsenault9c47dd52016-02-11 06:02:01 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
Tom Stellardeef2ad92013-08-05 22:45:56 +00003
4; Tests for indirect addressing on SI, which is implemented using dynamic
5; indexing of vectors.
6
Tom Stellard8d19f9b2015-03-20 03:12:42 +00007; CHECK-LABEL: {{^}}extract_w_offset:
Matt Arsenault28419272015-10-07 00:42:51 +00008; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
9; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
10; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
11; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
Tom Stellard326d6ec2014-11-05 14:50:53 +000012; CHECK: s_mov_b32 m0
13; CHECK-NEXT: v_movrels_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +000014define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
15entry:
Matt Arsenault28419272015-10-07 00:42:51 +000016 %idx = add i32 %in, 1
17 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
18 store float %elt, float addrspace(1)* %out
19 ret void
20}
21
22; XXX: Could do v_or_b32 directly
23; CHECK-LABEL: {{^}}extract_w_offset_salu_use_vector:
24; CHECK-DAG: s_or_b32
25; CHECK-DAG: s_or_b32
26; CHECK-DAG: s_or_b32
27; CHECK-DAG: s_or_b32
28; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
29; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
30; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
31; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
32; CHECK: s_mov_b32 m0
33; CHECK-NEXT: v_movrels_b32_e32
34define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
35entry:
36 %idx = add i32 %in, 1
37 %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
38 %elt = extractelement <4 x i32> %vec, i32 %idx
39 store i32 %elt, i32 addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000040 ret void
41}
42
Tom Stellard8d19f9b2015-03-20 03:12:42 +000043; CHECK-LABEL: {{^}}extract_wo_offset:
Matt Arsenault28419272015-10-07 00:42:51 +000044; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
45; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
46; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
47; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
Tom Stellard326d6ec2014-11-05 14:50:53 +000048; CHECK: s_mov_b32 m0
49; CHECK-NEXT: v_movrels_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +000050define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
51entry:
Matt Arsenault28419272015-10-07 00:42:51 +000052 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
53 store float %elt, float addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000054 ret void
55}
56
Tom Stellard8b0182a2015-04-23 20:32:01 +000057; CHECK-LABEL: {{^}}extract_neg_offset_sgpr:
58; The offset depends on the register that holds the first element of the vector.
59; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
60; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
61define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
62entry:
63 %index = add i32 %offset, -512
64 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
65 store i32 %value, i32 addrspace(1)* %out
66 ret void
67}
68
Matt Arsenault28419272015-10-07 00:42:51 +000069; CHECK-LABEL: {{^}}extract_neg_offset_sgpr_loaded:
70; The offset depends on the register that holds the first element of the vector.
71; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
72; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
73define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
74entry:
75 %index = add i32 %offset, -512
76 %or = or <4 x i32> %vec0, %vec1
77 %value = extractelement <4 x i32> %or, i32 %index
78 store i32 %value, i32 addrspace(1)* %out
79 ret void
80}
81
Tom Stellard8b0182a2015-04-23 20:32:01 +000082; CHECK-LABEL: {{^}}extract_neg_offset_vgpr:
83; The offset depends on the register that holds the first element of the vector.
84; CHECK: v_readfirstlane_b32
85; CHECK: s_add_i32 m0, m0, 0xfffffe{{[0-9a-z]+}}
86; CHECK-NEXT: v_movrels_b32_e32 v{{[0-9]}}, v0
87; CHECK: s_cbranch_execnz
88define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
89entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +000090 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +000091 %index = add i32 %id, -512
92 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
93 store i32 %value, i32 addrspace(1)* %out
94 ret void
95}
96
Matt Arsenault21a46252016-06-27 19:57:44 +000097; CHECK-LABEL: {{^}}extract_undef_offset_sgpr:
98define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
99entry:
100 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
101 %value = extractelement <4 x i32> %ld, i32 undef
102 store i32 %value, i32 addrspace(1)* %out
103 ret void
104}
105
106; CHECK-LABEL: {{^}}insert_undef_offset_sgpr_vector_src:
107; CHECK: buffer_load_dwordx4
108; CHECK: s_mov_b32 m0,
109; CHECK-NEXT: v_movreld_b32
110define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
111entry:
112 %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
113 %value = insertelement <4 x i32> %ld, i32 5, i32 undef
114 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
115 ret void
116}
117
Tom Stellard8d19f9b2015-03-20 03:12:42 +0000118; CHECK-LABEL: {{^}}insert_w_offset:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000119; CHECK: s_mov_b32 m0
120; CHECK-NEXT: v_movreld_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +0000121define void @insert_w_offset(float addrspace(1)* %out, i32 %in) {
122entry:
123 %0 = add i32 %in, 1
124 %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
125 %2 = extractelement <4 x float> %1, i32 2
126 store float %2, float addrspace(1)* %out
127 ret void
128}
129
Tom Stellard8d19f9b2015-03-20 03:12:42 +0000130; CHECK-LABEL: {{^}}insert_wo_offset:
Tom Stellard326d6ec2014-11-05 14:50:53 +0000131; CHECK: s_mov_b32 m0
132; CHECK-NEXT: v_movreld_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +0000133define void @insert_wo_offset(float addrspace(1)* %out, i32 %in) {
134entry:
135 %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
136 %1 = extractelement <4 x float> %0, i32 2
137 store float %1, float addrspace(1)* %out
138 ret void
139}
Tom Stellard8b0182a2015-04-23 20:32:01 +0000140
141; CHECK-LABEL: {{^}}insert_neg_offset_sgpr:
142; The offset depends on the register that holds the first element of the vector.
143; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
144; CHECK: v_movreld_b32_e32 v0, v{{[0-9]}}
145define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
146entry:
147 %index = add i32 %offset, -512
148 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
149 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
150 ret void
151}
152
Matt Arsenault28419272015-10-07 00:42:51 +0000153; The vector indexed into is originally loaded into an SGPR rather
154; than built with a reg_sequence
155
156; CHECK-LABEL: {{^}}insert_neg_offset_sgpr_loadreg:
157; The offset depends on the register that holds the first element of the vector.
158; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
159; CHECK: v_movreld_b32_e32 v0, v{{[0-9]}}
160define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
161entry:
162 %index = add i32 %offset, -512
163 %value = insertelement <4 x i32> %vec, i32 5, i32 %index
164 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
165 ret void
166}
167
Tom Stellard8b0182a2015-04-23 20:32:01 +0000168; CHECK-LABEL: {{^}}insert_neg_offset_vgpr:
169; The offset depends on the register that holds the first element of the vector.
170; CHECK: v_readfirstlane_b32
171; CHECK: s_add_i32 m0, m0, 0xfffffe{{[0-9a-z]+}}
172; CHECK-NEXT: v_movreld_b32_e32 v0, v{{[0-9]}}
173; CHECK: s_cbranch_execnz
174define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
175entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000176 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000177 %index = add i32 %id, -512
178 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
179 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
180 ret void
181}
182
183; CHECK-LABEL: {{^}}insert_neg_inline_offset_vgpr:
184; The offset depends on the register that holds the first element of the vector.
185; CHECK: v_readfirstlane_b32
186; CHECK: s_add_i32 m0, m0, -{{[0-9]+}}
187; CHECK-NEXT: v_movreld_b32_e32 v0, v{{[0-9]}}
188; CHECK: s_cbranch_execnz
189define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
190entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000191 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000192 %index = add i32 %id, -16
193 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
194 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
195 ret void
196}
197
Matt Arsenault9babdf42016-06-22 20:15:28 +0000198; When the block is split to insert the loop, make sure any other
199; places that need to be expanded in the same block are also handled.
200
201; CHECK-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
202
Matthias Braun6ad3d052016-06-25 00:23:00 +0000203; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000204; CHECK-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
205; CHECK-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
206; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
207; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
208; CHECK: s_waitcnt vmcnt(0)
209
210; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
211
212; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
213; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
214; CHECK: s_mov_b32 m0, vcc_lo
215; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
216; CHECK: s_and_saveexec_b64 vcc, vcc
217; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
218; CHECK-NEXT: s_xor_b64 exec, exec, vcc
219; CHECK: s_cbranch_execnz [[LOOP0]]
220
221; FIXME: Redundant copy
222; CHECK: s_mov_b64 exec, [[MASK]]
Matthias Braun6ad3d052016-06-25 00:23:00 +0000223; CHECK: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000224
225; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
226; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
227; CHECK: s_mov_b32 m0, vcc_lo
228; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
229; CHECK: s_and_saveexec_b64 vcc, vcc
230; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1]]
231; CHECK-NEXT: s_xor_b64 exec, exec, vcc
232; CHECK: s_cbranch_execnz [[LOOP1]]
233
234; CHECK: buffer_store_dword [[MOVREL0]]
235; CHECK: buffer_store_dword [[MOVREL1]]
236define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
237entry:
238 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
239 %id.ext = zext i32 %id to i64
240 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
241 %idx0 = load volatile i32, i32 addrspace(1)* %gep
242 %idx1 = add i32 %idx0, 1
243 %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000244 %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" ()
Matt Arsenault9babdf42016-06-22 20:15:28 +0000245 %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1
246 store volatile i32 %val0, i32 addrspace(1)* %out0
247 store volatile i32 %val1, i32 addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000248 %cmp = icmp eq i32 %id, 0
249 br i1 %cmp, label %bb1, label %bb2
250
251bb1:
252 store volatile i32 %live.out.reg, i32 addrspace(1)* undef
253 br label %bb2
254
255bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000256 ret void
257}
258
259; CHECK-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
260; CHECK-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
261; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
262; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], s[[S_ELT0]]
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000263; CHECK-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
Matt Arsenault9babdf42016-06-22 20:15:28 +0000264; CHECK-DAG: s_waitcnt vmcnt(0)
265
266; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
267
268; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
269; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
270; CHECK: s_mov_b32 m0, vcc_lo
271; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
272; CHECK: s_and_saveexec_b64 vcc, vcc
273; CHECK-NEXT: v_movreld_b32_e32 v[[MOVREL0:[0-9]+]], [[INS0]]
274; CHECK-NEXT: s_xor_b64 exec, exec, vcc
275; CHECK: s_cbranch_execnz [[LOOP0]]
276
277; FIXME: Redundant copy
278; CHECK: s_mov_b64 exec, [[MASK]]
279; CHECK: v_mov_b32_e32 [[INS1:v[0-9]+]], 63
280; CHECK: s_mov_b64 [[MASK]], exec
281
282; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
283; CHECK: v_readfirstlane_b32 vcc_lo, [[IDX0]]
284; CHECK: s_mov_b32 m0, vcc_lo
285; CHECK: v_cmp_eq_u32_e32 vcc, m0, [[IDX0]]
286; CHECK: s_and_saveexec_b64 vcc, vcc
287; CHECK-NEXT: v_movreld_b32_e32 v[[MOVREL1:[0-9]+]], [[INS1]]
288; CHECK-NEXT: s_xor_b64 exec, exec, vcc
289; CHECK: s_cbranch_execnz [[LOOP1]]
290
291; CHECK: buffer_store_dwordx4 v{{\[}}[[MOVREL0]]:
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000292
293; CHECK: buffer_store_dword [[INS0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000294define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
295entry:
296 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
297 %id.ext = zext i32 %id to i64
298 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
299 %idx0 = load volatile i32, i32 addrspace(1)* %gep
300 %idx1 = add i32 %idx0, 1
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000301 %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
302 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000303 %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
304 store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000305 %cmp = icmp eq i32 %id, 0
306 br i1 %cmp, label %bb1, label %bb2
307
308bb1:
309 store volatile i32 %live.out.val, i32 addrspace(1)* undef
310 br label %bb2
311
312bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000313 ret void
314}
315
316; CHECK-LABEL: {{^}}extract_adjacent_blocks:
317; CHECK: s_load_dword [[ARG:s[0-9]+]]
318; CHECK: s_cmp_lg_i32
319; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
320
321; CHECK: buffer_load_dwordx4
322; CHECK: s_mov_b32 m0,
323; CHECK: v_movrels_b32_e32
324; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
325
326; CHECK: [[BB4]]:
327; CHECK: buffer_load_dwordx4
328; CHECK: s_mov_b32 m0,
329; CHECK: v_movrels_b32_e32
330
331; CHECK: [[ENDBB]]:
332; CHECK: buffer_store_dword
333; CHECK: s_endpgm
334define void @extract_adjacent_blocks(i32 %arg) #0 {
335bb:
336 %tmp = icmp eq i32 %arg, 0
337 br i1 %tmp, label %bb1, label %bb4
338
339bb1:
340 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
341 %tmp3 = extractelement <4 x float> %tmp2, i32 undef
342 br label %bb7
343
344bb4:
345 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
346 %tmp6 = extractelement <4 x float> %tmp5, i32 undef
347 br label %bb7
348
349bb7:
350 %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
351 store volatile float %tmp8, float addrspace(1)* undef
352 ret void
353}
354
355; CHECK-LABEL: {{^}}insert_adjacent_blocks:
356; CHECK: s_load_dword [[ARG:s[0-9]+]]
357; CHECK: s_cmp_lg_i32
358; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
359
360; CHECK: buffer_load_dwordx4
361; CHECK: s_mov_b32 m0,
362; CHECK: v_movreld_b32_e32
363; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
364
365; CHECK: [[BB4]]:
366; CHECK: buffer_load_dwordx4
367; CHECK: s_mov_b32 m0,
368; CHECK: v_movreld_b32_e32
369
370; CHECK: [[ENDBB]]:
371; CHECK: buffer_store_dword
372; CHECK: s_endpgm
373define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
374bb:
375 %tmp = icmp eq i32 %arg, 0
376 br i1 %tmp, label %bb1, label %bb4
377
378bb1: ; preds = %bb
379 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
380 %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef
381 br label %bb7
382
383bb4: ; preds = %bb
384 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
385 %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef
386 br label %bb7
387
388bb7: ; preds = %bb4, %bb1
389 %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
390 store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef
391 ret void
392}
393
394; FIXME: Should be able to fold zero input to movreld to inline imm?
395
396; CHECK-LABEL: {{^}}multi_same_block:
397; CHECK: s_load_dword [[ARG:s[0-9]+]]
398; CHECK-DAG: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
399; CHECK-DAG: s_add_i32 m0, [[ARG]], -16
400; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, [[ZERO]]
401
402; CHECK: s_add_i32 m0, [[ARG]], -14
403; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
404
405; CHECK: s_mov_b32 m0, -1
406; CHECK: ds_write_b32
407; CHECK: ds_write_b32
408; CHECK: s_endpgm
409define void @multi_same_block(i32 %arg) #0 {
410bb:
411 %tmp1 = add i32 %arg, -16
412 %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 0.000000e+00, i32 %tmp1
413 %tmp3 = add i32 %arg, -16
414 %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float 0x3FB99999A0000000, i32 %tmp3
415 %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32>
416 %tmp6 = extractelement <6 x i32> %tmp5, i32 1
417 %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32>
418 %tmp8 = extractelement <6 x i32> %tmp7, i32 5
419 store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4
420 store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4
421 ret void
422}
423
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000424; offset puts outside of superegister bounaries, so clamp to 1st element.
425; CHECK-LABEL: {{^}}extract_largest_inbounds_offset:
426; CHECK: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
427; CHECK: s_load_dword [[IDX:s[0-9]+]]
428; CHECK: s_mov_b32 m0, [[IDX]]
429; CHECK-NEXT: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
430; CHECK: buffer_store_dword [[EXTRACT]]
431define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
432entry:
433 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
434 %offset = add i32 %idx, 3
435 %value = extractelement <4 x i32> %ld, i32 %offset
436 store i32 %value, i32 addrspace(1)* %out
437 ret void
438}
439
440; CHECK-LABL: {{^}}extract_out_of_bounds_offset:
441; CHECK: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
442; CHECK: s_load_dword [[IDX:s[0-9]+]]
443; CHECK: s_add_i32 m0, [[IDX]], 4
444; CHECK-NEXT: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
445; CHECK: buffer_store_dword [[EXTRACT]]
446define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
447entry:
448 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
449 %offset = add i32 %idx, 4
450 %value = extractelement <4 x i32> %ld, i32 %offset
451 store i32 %value, i32 addrspace(1)* %out
452 ret void
453}
454
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000455declare i32 @llvm.amdgcn.workitem.id.x() #1
456
Matt Arsenault9babdf42016-06-22 20:15:28 +0000457attributes #0 = { nounwind }
Tom Stellard8b0182a2015-04-23 20:32:01 +0000458attributes #1 = { nounwind readnone }