blob: 5f5978c87b21755c85bb5c91b94dcf242c5b8503 [file] [log] [blame]
Matt Arsenault9c47dd52016-02-11 06:02:01 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck %s
Tom Stellardeef2ad92013-08-05 22:45:56 +00003
4; Tests for indirect addressing on SI, which is implemented using dynamic
5; indexing of vectors.
6
Tom Stellard8d19f9b2015-03-20 03:12:42 +00007; CHECK-LABEL: {{^}}extract_w_offset:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +00008; CHECK-DAG: s_load_dword [[IN:s[0-9]+]]
Matt Arsenault28419272015-10-07 00:42:51 +00009; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
10; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000011; CHECK-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0
Matt Arsenault28419272015-10-07 00:42:51 +000012; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000013; CHECK-DAG: s_mov_b32 m0, [[IN]]
14; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
Tom Stellardeef2ad92013-08-05 22:45:56 +000015define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
16entry:
Matt Arsenault28419272015-10-07 00:42:51 +000017 %idx = add i32 %in, 1
18 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
19 store float %elt, float addrspace(1)* %out
20 ret void
21}
22
23; XXX: Could do v_or_b32 directly
24; CHECK-LABEL: {{^}}extract_w_offset_salu_use_vector:
25; CHECK-DAG: s_or_b32
26; CHECK-DAG: s_or_b32
27; CHECK-DAG: s_or_b32
28; CHECK-DAG: s_or_b32
29; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
30; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
31; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
32; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
33; CHECK: s_mov_b32 m0
34; CHECK-NEXT: v_movrels_b32_e32
35define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
36entry:
37 %idx = add i32 %in, 1
38 %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
39 %elt = extractelement <4 x i32> %vec, i32 %idx
40 store i32 %elt, i32 addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000041 ret void
42}
43
Tom Stellard8d19f9b2015-03-20 03:12:42 +000044; CHECK-LABEL: {{^}}extract_wo_offset:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000045; CHECK-DAG: s_load_dword [[IN:s[0-9]+]]
Matt Arsenault28419272015-10-07 00:42:51 +000046; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
47; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
48; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000049; CHECK-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0
50; CHECK-DAG: s_mov_b32 m0, [[IN]]
51; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
Tom Stellardeef2ad92013-08-05 22:45:56 +000052define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
53entry:
Matt Arsenault28419272015-10-07 00:42:51 +000054 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
55 store float %elt, float addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000056 ret void
57}
58
Tom Stellard8b0182a2015-04-23 20:32:01 +000059; CHECK-LABEL: {{^}}extract_neg_offset_sgpr:
60; The offset depends on the register that holds the first element of the vector.
61; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
62; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
63define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
64entry:
65 %index = add i32 %offset, -512
66 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
67 store i32 %value, i32 addrspace(1)* %out
68 ret void
69}
70
Matt Arsenault28419272015-10-07 00:42:51 +000071; CHECK-LABEL: {{^}}extract_neg_offset_sgpr_loaded:
72; The offset depends on the register that holds the first element of the vector.
73; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
74; CHECK: v_movrels_b32_e32 v{{[0-9]}}, v0
75define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
76entry:
77 %index = add i32 %offset, -512
78 %or = or <4 x i32> %vec0, %vec1
79 %value = extractelement <4 x i32> %or, i32 %index
80 store i32 %value, i32 addrspace(1)* %out
81 ret void
82}
83
Tom Stellard8b0182a2015-04-23 20:32:01 +000084; CHECK-LABEL: {{^}}extract_neg_offset_vgpr:
85; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000086
87; FIXME: The waitcnt for the argument load can go after the loop
88; CHECK: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec
89; CHECK: s_waitcnt lgkmcnt(0)
90
91; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}}
92; CHECK: s_add_i32 m0, [[READLANE]], 0xfffffe0
93; CHECK: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1
Tom Stellard8b0182a2015-04-23 20:32:01 +000094; CHECK: s_cbranch_execnz
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000095
96; CHECK: buffer_store_dword [[RESULT]]
Tom Stellard8b0182a2015-04-23 20:32:01 +000097define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
98entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +000099 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000100 %index = add i32 %id, -512
101 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
102 store i32 %value, i32 addrspace(1)* %out
103 ret void
104}
105
Matt Arsenault21a46252016-06-27 19:57:44 +0000106; CHECK-LABEL: {{^}}extract_undef_offset_sgpr:
107define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
108entry:
109 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
110 %value = extractelement <4 x i32> %ld, i32 undef
111 store i32 %value, i32 addrspace(1)* %out
112 ret void
113}
114
115; CHECK-LABEL: {{^}}insert_undef_offset_sgpr_vector_src:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000116; CHECK-DAG: buffer_load_dwordx4
117; CHECK-DAG: s_mov_b32 m0,
118; CHECK: v_movreld_b32
Matt Arsenault21a46252016-06-27 19:57:44 +0000119define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
120entry:
121 %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
122 %value = insertelement <4 x i32> %ld, i32 5, i32 undef
123 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
124 ret void
125}
126
Tom Stellard8d19f9b2015-03-20 03:12:42 +0000127; CHECK-LABEL: {{^}}insert_w_offset:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000128; CHECK: s_load_dword [[IN:s[0-9]+]]
129; CHECK: s_mov_b32 m0, [[IN]]
130; CHECK: v_movreld_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +0000131define void @insert_w_offset(float addrspace(1)* %out, i32 %in) {
132entry:
133 %0 = add i32 %in, 1
134 %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
135 %2 = extractelement <4 x float> %1, i32 2
136 store float %2, float addrspace(1)* %out
137 ret void
138}
139
Tom Stellard8d19f9b2015-03-20 03:12:42 +0000140; CHECK-LABEL: {{^}}insert_wo_offset:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000141; CHECK: s_load_dword [[IN:s[0-9]+]]
142; CHECK: s_mov_b32 m0, [[IN]]
143; CHECK: v_movreld_b32_e32
Tom Stellardeef2ad92013-08-05 22:45:56 +0000144define void @insert_wo_offset(float addrspace(1)* %out, i32 %in) {
145entry:
146 %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
147 %1 = extractelement <4 x float> %0, i32 2
148 store float %1, float addrspace(1)* %out
149 ret void
150}
Tom Stellard8b0182a2015-04-23 20:32:01 +0000151
152; CHECK-LABEL: {{^}}insert_neg_offset_sgpr:
153; The offset depends on the register that holds the first element of the vector.
154; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000155; CHECK: v_movreld_b32_e32 v0, 5
Tom Stellard8b0182a2015-04-23 20:32:01 +0000156define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
157entry:
158 %index = add i32 %offset, -512
159 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
160 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
161 ret void
162}
163
Matt Arsenault28419272015-10-07 00:42:51 +0000164; The vector indexed into is originally loaded into an SGPR rather
165; than built with a reg_sequence
166
167; CHECK-LABEL: {{^}}insert_neg_offset_sgpr_loadreg:
168; The offset depends on the register that holds the first element of the vector.
169; CHECK: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000170; CHECK: v_movreld_b32_e32 v0, 5
Matt Arsenault28419272015-10-07 00:42:51 +0000171define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
172entry:
173 %index = add i32 %offset, -512
174 %value = insertelement <4 x i32> %vec, i32 5, i32 %index
175 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
176 ret void
177}
178
Tom Stellard8b0182a2015-04-23 20:32:01 +0000179; CHECK-LABEL: {{^}}insert_neg_offset_vgpr:
180; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000181
182; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
183; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
184; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
185; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
186
187; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
188; CHECK: s_waitcnt lgkmcnt(0)
189
190; CHECK: [[LOOPBB:BB[0-9]+_[0-9]+]]:
191; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
192; CHECK: s_add_i32 m0, [[READLANE]], 0xfffffe00
193; CHECK: v_movreld_b32_e32 [[VEC_ELT0]], 5
194; CHECK: s_cbranch_execnz [[LOOPBB]]
195
196; CHECK: s_mov_b64 exec, [[SAVEEXEC]]
197; CHECK: buffer_store_dword
Tom Stellard8b0182a2015-04-23 20:32:01 +0000198define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
199entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000200 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000201 %index = add i32 %id, -512
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000202 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 5, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000203 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
204 ret void
205}
206
207; CHECK-LABEL: {{^}}insert_neg_inline_offset_vgpr:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000208
209; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
210; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
211; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
212; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
213; CHECK-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}}
214
215; CHECK: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
216; CHECK: s_waitcnt lgkmcnt(0)
217
Tom Stellard8b0182a2015-04-23 20:32:01 +0000218; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000219; CHECK: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
220; CHECK: s_add_i32 m0, [[READLANE]], -16
221; CHECK: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]]
Tom Stellard8b0182a2015-04-23 20:32:01 +0000222; CHECK: s_cbranch_execnz
223define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
224entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000225 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000226 %index = add i32 %id, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000227 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 500, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000228 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
229 ret void
230}
231
Matt Arsenault9babdf42016-06-22 20:15:28 +0000232; When the block is split to insert the loop, make sure any other
233; places that need to be expanded in the same block are also handled.
234
235; CHECK-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
236
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000237; FIXME: Why is vector copied in between?
238
Matthias Braun6ad3d052016-06-25 00:23:00 +0000239; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000240; CHECK-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
241; CHECK-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
242; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
243; CHECK-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000244
245; CHECK: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000246; CHECK: s_waitcnt vmcnt(0) lgkmcnt(0)
Matt Arsenault9babdf42016-06-22 20:15:28 +0000247
248; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000249; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
250; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
251; CHECK: s_mov_b32 m0, [[READLANE]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000252; CHECK: s_and_saveexec_b64 vcc, vcc
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000253; CHECK: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000254; CHECK-NEXT: s_xor_b64 exec, exec, vcc
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000255; CHECK-NEXT: s_cbranch_execnz [[LOOP0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000256
257; FIXME: Redundant copy
258; CHECK: s_mov_b64 exec, [[MASK]]
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000259; CHECK: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]]
Matthias Braun6ad3d052016-06-25 00:23:00 +0000260; CHECK: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000261
262; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000263; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
264; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
265; CHECK: s_mov_b32 m0, [[READLANE]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000266; CHECK: s_and_saveexec_b64 vcc, vcc
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000267; CHECK-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000268; CHECK-NEXT: s_xor_b64 exec, exec, vcc
269; CHECK: s_cbranch_execnz [[LOOP1]]
270
271; CHECK: buffer_store_dword [[MOVREL0]]
272; CHECK: buffer_store_dword [[MOVREL1]]
273define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
274entry:
275 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
276 %id.ext = zext i32 %id to i64
277 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
278 %idx0 = load volatile i32, i32 addrspace(1)* %gep
279 %idx1 = add i32 %idx0, 1
280 %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000281 %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" ()
Matt Arsenault9babdf42016-06-22 20:15:28 +0000282 %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1
283 store volatile i32 %val0, i32 addrspace(1)* %out0
284 store volatile i32 %val1, i32 addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000285 %cmp = icmp eq i32 %id, 0
286 br i1 %cmp, label %bb1, label %bb2
287
288bb1:
289 store volatile i32 %live.out.reg, i32 addrspace(1)* undef
290 br label %bb2
291
292bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000293 ret void
294}
295
296; CHECK-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
297; CHECK-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
298; CHECK-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000299; CHECK-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
Matt Arsenault9babdf42016-06-22 20:15:28 +0000300
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000301; CHECK-DAG: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
302; CHECK-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000303
304; CHECK: [[LOOP0:BB[0-9]+_[0-9]+]]:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000305; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
306; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
307; CHECK: s_mov_b32 m0, [[READLANE]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000308; CHECK: s_and_saveexec_b64 vcc, vcc
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000309; CHECK-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000310; CHECK-NEXT: s_xor_b64 exec, exec, vcc
311; CHECK: s_cbranch_execnz [[LOOP0]]
312
313; FIXME: Redundant copy
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000314; CHECK: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000315; CHECK: s_mov_b64 [[MASK]], exec
316
317; CHECK: [[LOOP1:BB[0-9]+_[0-9]+]]:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000318; CHECK-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
319; CHECK: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
320; CHECK: s_mov_b32 m0, [[READLANE]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000321; CHECK: s_and_saveexec_b64 vcc, vcc
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000322; CHECK-NEXT: v_movreld_b32_e32 [[VEC_ELT1]], 63
Matt Arsenault9babdf42016-06-22 20:15:28 +0000323; CHECK-NEXT: s_xor_b64 exec, exec, vcc
324; CHECK: s_cbranch_execnz [[LOOP1]]
325
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000326; CHECK: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000327
328; CHECK: buffer_store_dword [[INS0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000329define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
330entry:
331 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
332 %id.ext = zext i32 %id to i64
333 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
334 %idx0 = load volatile i32, i32 addrspace(1)* %gep
335 %idx1 = add i32 %idx0, 1
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000336 %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
337 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000338 %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
339 store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000340 %cmp = icmp eq i32 %id, 0
341 br i1 %cmp, label %bb1, label %bb2
342
343bb1:
344 store volatile i32 %live.out.val, i32 addrspace(1)* undef
345 br label %bb2
346
347bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000348 ret void
349}
350
351; CHECK-LABEL: {{^}}extract_adjacent_blocks:
352; CHECK: s_load_dword [[ARG:s[0-9]+]]
353; CHECK: s_cmp_lg_i32
354; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
355
356; CHECK: buffer_load_dwordx4
357; CHECK: s_mov_b32 m0,
358; CHECK: v_movrels_b32_e32
359; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
360
361; CHECK: [[BB4]]:
362; CHECK: buffer_load_dwordx4
363; CHECK: s_mov_b32 m0,
364; CHECK: v_movrels_b32_e32
365
366; CHECK: [[ENDBB]]:
367; CHECK: buffer_store_dword
368; CHECK: s_endpgm
369define void @extract_adjacent_blocks(i32 %arg) #0 {
370bb:
371 %tmp = icmp eq i32 %arg, 0
372 br i1 %tmp, label %bb1, label %bb4
373
374bb1:
375 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
376 %tmp3 = extractelement <4 x float> %tmp2, i32 undef
377 br label %bb7
378
379bb4:
380 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
381 %tmp6 = extractelement <4 x float> %tmp5, i32 undef
382 br label %bb7
383
384bb7:
385 %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
386 store volatile float %tmp8, float addrspace(1)* undef
387 ret void
388}
389
390; CHECK-LABEL: {{^}}insert_adjacent_blocks:
391; CHECK: s_load_dword [[ARG:s[0-9]+]]
392; CHECK: s_cmp_lg_i32
393; CHECK: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
394
395; CHECK: buffer_load_dwordx4
396; CHECK: s_mov_b32 m0,
397; CHECK: v_movreld_b32_e32
398; CHECK: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
399
400; CHECK: [[BB4]]:
401; CHECK: buffer_load_dwordx4
402; CHECK: s_mov_b32 m0,
403; CHECK: v_movreld_b32_e32
404
405; CHECK: [[ENDBB]]:
406; CHECK: buffer_store_dword
407; CHECK: s_endpgm
408define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
409bb:
410 %tmp = icmp eq i32 %arg, 0
411 br i1 %tmp, label %bb1, label %bb4
412
413bb1: ; preds = %bb
414 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
415 %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef
416 br label %bb7
417
418bb4: ; preds = %bb
419 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
420 %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef
421 br label %bb7
422
423bb7: ; preds = %bb4, %bb1
424 %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
425 store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef
426 ret void
427}
428
429; FIXME: Should be able to fold zero input to movreld to inline imm?
430
431; CHECK-LABEL: {{^}}multi_same_block:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000432
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000433; CHECK-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000
434; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000
435; CHECK-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000
436; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000
437; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000
438; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000
439; CHECK-DAG: s_load_dword [[ARG:s[0-9]+]]
440
441; CHECK-DAG: s_add_i32 m0, [[ARG]], -16
442; CHECK: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0
443; CHECK-NOT: m0
444
445; CHECK: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd
446; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd
447; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd
448; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd
449; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd
450; CHECK-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd
451; CHECK: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000452
453; CHECK: s_mov_b32 m0, -1
454; CHECK: ds_write_b32
455; CHECK: ds_write_b32
456; CHECK: s_endpgm
457define void @multi_same_block(i32 %arg) #0 {
458bb:
459 %tmp1 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000460 %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 4.000000e+00, i32 %tmp1
Matt Arsenault9babdf42016-06-22 20:15:28 +0000461 %tmp3 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000462 %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float -4.0, i32 %tmp3
Matt Arsenault9babdf42016-06-22 20:15:28 +0000463 %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32>
464 %tmp6 = extractelement <6 x i32> %tmp5, i32 1
465 %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32>
466 %tmp8 = extractelement <6 x i32> %tmp7, i32 5
467 store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4
468 store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4
469 ret void
470}
471
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000472; offset puts outside of superegister bounaries, so clamp to 1st element.
473; CHECK-LABEL: {{^}}extract_largest_inbounds_offset:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000474; CHECK-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
475; CHECK-DAG: s_load_dword [[IDX:s[0-9]+]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000476; CHECK: s_mov_b32 m0, [[IDX]]
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000477; CHECK: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000478; CHECK: buffer_store_dword [[EXTRACT]]
479define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
480entry:
481 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
482 %offset = add i32 %idx, 3
483 %value = extractelement <4 x i32> %ld, i32 %offset
484 store i32 %value, i32 addrspace(1)* %out
485 ret void
486}
487
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000488; CHECK-LABEL: {{^}}extract_out_of_bounds_offset:
489; CHECK-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
490; CHECK-DAG: s_load_dword [[IDX:s[0-9]+]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000491; CHECK: s_add_i32 m0, [[IDX]], 4
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000492; CHECK: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000493; CHECK: buffer_store_dword [[EXTRACT]]
494define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
495entry:
496 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
497 %offset = add i32 %idx, 4
498 %value = extractelement <4 x i32> %ld, i32 %offset
499 store i32 %value, i32 addrspace(1)* %out
500 ret void
501}
502
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000503; Test that the or is folded into the base address register instead of
504; added to m0
505
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000506; CHECK-LABEL: {{^}}extractelement_v4i32_or_index:
507; CHECK: s_load_dword [[IDX_IN:s[0-9]+]]
508; CHECK: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
509; CHECK-NOT: [[IDX_SHL]]
510; CHECK: s_mov_b32 m0, [[IDX_SHL]]
511; CHECK: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000512define void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
513entry:
514 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
515 %idx.shl = shl i32 %idx.in, 2
516 %idx = or i32 %idx.shl, 1
517 %value = extractelement <4 x i32> %ld, i32 %idx
518 store i32 %value, i32 addrspace(1)* %out
519 ret void
520}
521
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000522; CHECK-LABEL: {{^}}insertelement_v4f32_or_index:
523; CHECK: s_load_dword [[IDX_IN:s[0-9]+]]
524; CHECK: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
525; CHECK-NOT: [[IDX_SHL]]
526; CHECK: s_mov_b32 m0, [[IDX_SHL]]
527; CHECK: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000528define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
529 %idx.shl = shl i32 %idx.in, 2
530 %idx = or i32 %idx.shl, 1
531 %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx
532 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
533 ret void
534}
535
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000536; CHECK-LABEL: {{^}}broken_phi_bb:
537; CHECK: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8
538
539; CHECK: s_branch [[BB2:BB[0-9]+_[0-9]+]]
540
541; CHECK: {{^BB[0-9]+_[0-9]+}}:
542; CHECK: s_mov_b64 exec,
543
544; CHECK: [[BB2]]:
545; CHECK: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]]
546; CHECK: buffer_load_dword
547
548; CHECK: [[REGLOOP:BB[0-9]+_[0-9]+]]:
549; CHECK: v_movreld_b32_e32
550; CHECK: s_cbranch_execnz [[REGLOOP]]
551define void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
552bb:
553 br label %bb2
554
555bb2: ; preds = %bb4, %bb
556 %tmp = phi i32 [ 8, %bb ], [ %tmp7, %bb4 ]
557 %tmp3 = icmp slt i32 %tmp, %arg
558 br i1 %tmp3, label %bb4, label %bb8
559
560bb4: ; preds = %bb2
561 %vgpr = load volatile i32, i32 addrspace(1)* undef
562 %tmp5 = insertelement <8 x i32> undef, i32 undef, i32 %vgpr
563 %tmp6 = insertelement <8 x i32> %tmp5, i32 %arg1, i32 %vgpr
564 %tmp7 = extractelement <8 x i32> %tmp6, i32 0
565 br label %bb2
566
567bb8: ; preds = %bb2
568 ret void
569}
570
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000571declare i32 @llvm.amdgcn.workitem.id.x() #1
572
Matt Arsenault9babdf42016-06-22 20:15:28 +0000573attributes #0 = { nounwind }
Tom Stellard8b0182a2015-04-23 20:32:01 +0000574attributes #1 = { nounwind readnone }