blob: fab1f8d1225336ce92391f0a5b353e1dd100dfd4 [file] [log] [blame]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
Matt Arsenault7aad8fd2017-01-24 22:02:15 +00002; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=MOVREL %s
3; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -amdgpu-vgpr-index-mode -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
Marek Olsake22fdb92017-03-21 17:00:32 +00004; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=IDXMODE %s
Tom Stellardeef2ad92013-08-05 22:45:56 +00005
6; Tests for indirect addressing on SI, which is implemented using dynamic
7; indexing of vectors.
8
Matt Arsenault93401f42016-10-07 03:55:04 +00009; GCN-LABEL: {{^}}extract_w_offset:
10; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
11; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
12; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
13; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0
14; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000015
16; MOVREL-DAG: s_mov_b32 m0, [[IN]]
17; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
18
19; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}}
20; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]]
21; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000022define amdgpu_kernel void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +000023entry:
Matt Arsenault28419272015-10-07 00:42:51 +000024 %idx = add i32 %in, 1
25 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
26 store float %elt, float addrspace(1)* %out
27 ret void
28}
29
30; XXX: Could do v_or_b32 directly
Matt Arsenault93401f42016-10-07 03:55:04 +000031; GCN-LABEL: {{^}}extract_w_offset_salu_use_vector:
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000032; MOVREL: s_mov_b32 m0
Matt Arsenault93401f42016-10-07 03:55:04 +000033; GCN-DAG: s_or_b32
34; GCN-DAG: s_or_b32
35; GCN-DAG: s_or_b32
36; GCN-DAG: s_or_b32
37; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
38; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
39; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
40; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000041
42; MOVREL: v_movrels_b32_e32
43
44; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0{{$}}
45; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
46; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000047define amdgpu_kernel void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
Matt Arsenault28419272015-10-07 00:42:51 +000048entry:
49 %idx = add i32 %in, 1
50 %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
51 %elt = extractelement <4 x i32> %vec, i32 %idx
52 store i32 %elt, i32 addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000053 ret void
54}
55
Matt Arsenault93401f42016-10-07 03:55:04 +000056; GCN-LABEL: {{^}}extract_wo_offset:
57; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
58; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
59; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
60; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
61; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000062
63; MOVREL-DAG: s_mov_b32 m0, [[IN]]
64; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
65
66; IDXMODE: s_set_gpr_idx_on [[IN]], src0{{$}}
67; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, [[BASEREG]]
68; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000069define amdgpu_kernel void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +000070entry:
Matt Arsenault28419272015-10-07 00:42:51 +000071 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
72 store float %elt, float addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000073 ret void
74}
75
Matt Arsenault93401f42016-10-07 03:55:04 +000076; GCN-LABEL: {{^}}extract_neg_offset_sgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +000077; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000078; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
79; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0
80
81; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
Matthias Braun325cd2c2016-11-11 01:34:21 +000082; IDXMODE: v_mov_b32_e32 v2, 2
83; IDXMODE: v_mov_b32_e32 v3, 3
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000084; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
85; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
86; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000087define amdgpu_kernel void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
Tom Stellard8b0182a2015-04-23 20:32:01 +000088entry:
89 %index = add i32 %offset, -512
90 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
91 store i32 %value, i32 addrspace(1)* %out
92 ret void
93}
94
Matt Arsenault93401f42016-10-07 03:55:04 +000095; GCN-LABEL: {{^}}extract_neg_offset_sgpr_loaded:
Matt Arsenault28419272015-10-07 00:42:51 +000096; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultd486d3f2016-10-12 18:49:05 +000097; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
98; MOVREL: v_movrels_b32_e32 v{{[0-9]}}, v0
99
100; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
Matthias Braun325cd2c2016-11-11 01:34:21 +0000101; IDXMODE: v_mov_b32_e32 v0,
Konstantin Zhuravlyov0a1a7b62016-11-17 16:41:49 +0000102; IDXMODE: v_mov_b32_e32 v1,
103; IDXMODE: v_mov_b32_e32 v2,
104; IDXMODE: v_mov_b32_e32 v3,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000105; IDXMODE-NEXT: s_set_gpr_idx_on [[ADD_IDX]], src0{{$}}
106; IDXMODE-NEXT: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
107; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000108define amdgpu_kernel void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
Matt Arsenault28419272015-10-07 00:42:51 +0000109entry:
110 %index = add i32 %offset, -512
111 %or = or <4 x i32> %vec0, %vec1
112 %value = extractelement <4 x i32> %or, i32 %index
113 store i32 %value, i32 addrspace(1)* %out
114 ret void
115}
116
Matt Arsenault93401f42016-10-07 03:55:04 +0000117; GCN-LABEL: {{^}}extract_neg_offset_vgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +0000118; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000119
120; FIXME: The waitcnt for the argument load can go after the loop
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000121; IDXMODE: s_set_gpr_idx_on 0, src0
Matt Arsenault93401f42016-10-07 03:55:04 +0000122; GCN: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec
Mark Searles70359ac2017-06-02 14:19:25 +0000123; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]:
Matt Arsenault93401f42016-10-07 03:55:04 +0000124; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}}
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000125
126; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe0
127; MOVREL: s_and_saveexec_b64 vcc, vcc
128; MOVREL: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1
129
130; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00
131; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]]
132; IDXMODE: s_and_saveexec_b64 vcc, vcc
133; IDXMODE: v_mov_b32_e32 [[RESULT:v[0-9]+]], v1
134
Matt Arsenault93401f42016-10-07 03:55:04 +0000135; GCN: s_cbranch_execnz
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000136
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000137; IDXMODE: s_set_gpr_idx_off
Matt Arsenault93401f42016-10-07 03:55:04 +0000138; GCN: buffer_store_dword [[RESULT]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000139define amdgpu_kernel void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000140entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000141 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000142 %index = add i32 %id, -512
143 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
144 store i32 %value, i32 addrspace(1)* %out
145 ret void
146}
147
Matt Arsenault93401f42016-10-07 03:55:04 +0000148; GCN-LABEL: {{^}}extract_undef_offset_sgpr:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000149define amdgpu_kernel void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
Matt Arsenault21a46252016-06-27 19:57:44 +0000150entry:
151 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
152 %value = extractelement <4 x i32> %ld, i32 undef
153 store i32 %value, i32 addrspace(1)* %out
154 ret void
155}
156
Matt Arsenault93401f42016-10-07 03:55:04 +0000157; GCN-LABEL: {{^}}insert_undef_offset_sgpr_vector_src:
158; GCN-DAG: buffer_load_dwordx4
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000159; MOVREL-DAG: s_mov_b32 m0,
160; MOVREL: v_movreld_b32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000161define amdgpu_kernel void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
Matt Arsenault21a46252016-06-27 19:57:44 +0000162entry:
163 %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
164 %value = insertelement <4 x i32> %ld, i32 5, i32 undef
165 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
166 ret void
167}
168
Matt Arsenault93401f42016-10-07 03:55:04 +0000169; GCN-LABEL: {{^}}insert_w_offset:
170; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000171; MOVREL-DAG: s_mov_b32 m0, [[IN]]
Matt Arsenault93401f42016-10-07 03:55:04 +0000172; GCN-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0
173; GCN-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0
174; GCN-DAG: v_mov_b32_e32 v[[ELT2:[0-9]+]], 0x40400000
175; GCN-DAG: v_mov_b32_e32 v[[ELT3:[0-9]+]], 4.0
176; GCN-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x40a00000
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000177
178; MOVREL: v_movreld_b32_e32 v[[ELT1]], v[[INS]]
179; MOVREL: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000180define amdgpu_kernel void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +0000181entry:
182 %0 = add i32 %in, 1
183 %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
Matt Arsenaultf403df32016-08-26 06:31:32 +0000184 store <4 x float> %1, <4 x float> addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +0000185 ret void
186}
187
Matt Arsenault93401f42016-10-07 03:55:04 +0000188; GCN-LABEL: {{^}}insert_wo_offset:
189; GCN: s_load_dword [[IN:s[0-9]+]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000190
191; MOVREL: s_mov_b32 m0, [[IN]]
192; MOVREL: v_movreld_b32_e32 v[[ELT0:[0-9]+]]
193
194; IDXMODE: s_set_gpr_idx_on [[IN]], dst
195; IDXMODE-NEXT: v_mov_b32_e32 v[[ELT0:[0-9]+]], v{{[0-9]+}}
196; IDXMODE-NEXT: s_set_gpr_idx_off
197
Matt Arsenault93401f42016-10-07 03:55:04 +0000198; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000199define amdgpu_kernel void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +0000200entry:
201 %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
Matt Arsenaultf403df32016-08-26 06:31:32 +0000202 store <4 x float> %0, <4 x float> addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +0000203 ret void
204}
Tom Stellard8b0182a2015-04-23 20:32:01 +0000205
Matt Arsenault93401f42016-10-07 03:55:04 +0000206; GCN-LABEL: {{^}}insert_neg_offset_sgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +0000207; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000208; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
209; MOVREL: v_movreld_b32_e32 v0, 5
210
211; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
212; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst
213; IDXMODE-NEXT: v_mov_b32_e32 v0, 5
214; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000215define amdgpu_kernel void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000216entry:
217 %index = add i32 %offset, -512
218 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
219 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
220 ret void
221}
222
Matt Arsenault28419272015-10-07 00:42:51 +0000223; The vector indexed into is originally loaded into an SGPR rather
224; than built with a reg_sequence
225
Matt Arsenault93401f42016-10-07 03:55:04 +0000226; GCN-LABEL: {{^}}insert_neg_offset_sgpr_loadreg:
Matt Arsenault28419272015-10-07 00:42:51 +0000227; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000228; MOVREL: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
229; MOVREL: v_movreld_b32_e32 v0, 5
230
231; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
232; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], dst
233; IDXMODE-NEXT: v_mov_b32_e32 v0, 5
234; IDXMODE-NEXT: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000235define amdgpu_kernel void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
Matt Arsenault28419272015-10-07 00:42:51 +0000236entry:
237 %index = add i32 %offset, -512
238 %value = insertelement <4 x i32> %vec, i32 5, i32 %index
239 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
240 ret void
241}
242
Matt Arsenault93401f42016-10-07 03:55:04 +0000243; GCN-LABEL: {{^}}insert_neg_offset_vgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +0000244; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000245
Matt Arsenault93401f42016-10-07 03:55:04 +0000246; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
247; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
248; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
249; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000250
Matt Arsenault93401f42016-10-07 03:55:04 +0000251; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault93401f42016-10-07 03:55:04 +0000252; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]:
253; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000254
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000255; MOVREL: s_add_i32 m0, [[READLANE]], 0xfffffe00
256; MOVREL: s_and_saveexec_b64 vcc, vcc
257; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], 5
258
259; IDXMODE: s_addk_i32 [[ADD_IDX:s[0-9]+]], 0xfe00{{$}}
260; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]]
261; IDXMODE: s_and_saveexec_b64 vcc, vcc
262; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, 5
263
264; GCN: s_cbranch_execnz [[LOOPBB]]
Matt Arsenault93401f42016-10-07 03:55:04 +0000265; GCN: s_mov_b64 exec, [[SAVEEXEC]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000266
267; IDXMODE: s_set_gpr_idx_off
268
Matt Arsenault93401f42016-10-07 03:55:04 +0000269; GCN: buffer_store_dword
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000270define amdgpu_kernel void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000271entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000272 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000273 %index = add i32 %id, -512
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000274 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 5, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000275 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
276 ret void
277}
278
Matt Arsenault93401f42016-10-07 03:55:04 +0000279; GCN-LABEL: {{^}}insert_neg_inline_offset_vgpr:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000280
Matt Arsenault93401f42016-10-07 03:55:04 +0000281; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
282; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
283; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
284; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
285; GCN-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000286
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000287; IDXMODE: s_set_gpr_idx_on 0, dst
288
Matt Arsenault93401f42016-10-07 03:55:04 +0000289; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000290
Tom Stellard8b0182a2015-04-23 20:32:01 +0000291; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +0000292; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000293
294; MOVREL: s_add_i32 m0, [[READLANE]], -16
295; MOVREL: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]]
296
297; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[READLANE]], -16
298; IDXMODE: s_set_gpr_idx_idx [[ADD_IDX]]
299; IDXMODE: v_mov_b32_e32 [[VEC_ELT0]], [[VAL]]
300
Matt Arsenault93401f42016-10-07 03:55:04 +0000301; GCN: s_cbranch_execnz
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000302
303; IDXMODE: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000304define amdgpu_kernel void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
Tom Stellard8b0182a2015-04-23 20:32:01 +0000305entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000306 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000307 %index = add i32 %id, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000308 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 500, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000309 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
310 ret void
311}
312
Matt Arsenault9babdf42016-06-22 20:15:28 +0000313; When the block is split to insert the loop, make sure any other
314; places that need to be expanded in the same block are also handled.
315
Matt Arsenault93401f42016-10-07 03:55:04 +0000316; GCN-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000317
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000318; FIXME: Why is vector copied in between?
319
Matt Arsenault93401f42016-10-07 03:55:04 +0000320; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
321; GCN-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
322; GCN-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
323; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
324; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000325
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000326; IDXMODE: s_set_gpr_idx_on 0, src0
327
Matt Arsenault93401f42016-10-07 03:55:04 +0000328; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000329
Matt Arsenault93401f42016-10-07 03:55:04 +0000330; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
Mark Searles70359ac2017-06-02 14:19:25 +0000331; GCN-NEXT: s_waitcnt vmcnt(0)
Matt Arsenault93401f42016-10-07 03:55:04 +0000332; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
333; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000334
335; MOVREL: s_mov_b32 m0, [[READLANE]]
336; MOVREL: s_and_saveexec_b64 vcc, vcc
337; MOVREL: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
338
339; IDXMODE: s_set_gpr_idx_idx [[READLANE]]
340; IDXMODE: s_and_saveexec_b64 vcc, vcc
341; IDXMODE: v_mov_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
342
Matt Arsenault93401f42016-10-07 03:55:04 +0000343; GCN-NEXT: s_xor_b64 exec, exec, vcc
344; GCN-NEXT: s_cbranch_execnz [[LOOP0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000345
346; FIXME: Redundant copy
Matt Arsenault93401f42016-10-07 03:55:04 +0000347; GCN: s_mov_b64 exec, [[MASK]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000348; IDXMODE: s_set_gpr_idx_off
349
Matt Arsenault93401f42016-10-07 03:55:04 +0000350; GCN: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000351
352; IDXMODE: s_set_gpr_idx_on 0, src0
Matt Arsenault93401f42016-10-07 03:55:04 +0000353; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000354
Matt Arsenault93401f42016-10-07 03:55:04 +0000355; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
356; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
357; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000358
359; MOVREL: s_mov_b32 m0, [[READLANE]]
360; MOVREL: s_and_saveexec_b64 vcc, vcc
361; MOVREL-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
362
363; IDXMODE: s_set_gpr_idx_idx [[READLANE]]
364; IDXMODE: s_and_saveexec_b64 vcc, vcc
365; IDXMODE-NEXT: v_mov_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
366
Matt Arsenault93401f42016-10-07 03:55:04 +0000367; GCN-NEXT: s_xor_b64 exec, exec, vcc
368; GCN: s_cbranch_execnz [[LOOP1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000369
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000370; IDXMODE: s_set_gpr_idx_off
371
Matt Arsenault93401f42016-10-07 03:55:04 +0000372; GCN: buffer_store_dword [[MOVREL0]]
373; GCN: buffer_store_dword [[MOVREL1]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000374define amdgpu_kernel void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000375entry:
376 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
377 %id.ext = zext i32 %id to i64
378 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
379 %idx0 = load volatile i32, i32 addrspace(1)* %gep
380 %idx1 = add i32 %idx0, 1
381 %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000382 %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" ()
Matt Arsenault9babdf42016-06-22 20:15:28 +0000383 %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1
384 store volatile i32 %val0, i32 addrspace(1)* %out0
385 store volatile i32 %val1, i32 addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000386 %cmp = icmp eq i32 %id, 0
387 br i1 %cmp, label %bb1, label %bb2
388
389bb1:
390 store volatile i32 %live.out.reg, i32 addrspace(1)* undef
391 br label %bb2
392
393bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000394 ret void
395}
396
Matt Arsenault93401f42016-10-07 03:55:04 +0000397; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
398; GCN-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
399; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
400; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
Matt Arsenault9babdf42016-06-22 20:15:28 +0000401
Matt Arsenault93401f42016-10-07 03:55:04 +0000402; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]]
403; GCN: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}}
404; GCN: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}}
405; GCN: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000406
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000407; IDXMODE: s_set_gpr_idx_on 0, dst
408
Matt Arsenault93401f42016-10-07 03:55:04 +0000409; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
Mark Searles70359ac2017-06-02 14:19:25 +0000410; GCN-NEXT: s_waitcnt vmcnt(0)
Matt Arsenault93401f42016-10-07 03:55:04 +0000411; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
412; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000413
414; MOVREL: s_mov_b32 m0, [[READLANE]]
415; MOVREL: s_and_saveexec_b64 vcc, vcc
416; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
417
418; IDXMODE: s_set_gpr_idx_idx [[READLANE]]
419; IDXMODE: s_and_saveexec_b64 vcc, vcc
420; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT0]], [[INS0]]
421
Matt Arsenault93401f42016-10-07 03:55:04 +0000422; GCN-NEXT: s_xor_b64 exec, exec, vcc
423; GCN: s_cbranch_execnz [[LOOP0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000424
425; FIXME: Redundant copy
Matt Arsenault93401f42016-10-07 03:55:04 +0000426; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000427; IDXMODE: s_set_gpr_idx_off
428
429; IDXMODE: s_set_gpr_idx_on 0, dst
Matt Arsenault93401f42016-10-07 03:55:04 +0000430; GCN: s_mov_b64 [[MASK]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000431
Matt Arsenault93401f42016-10-07 03:55:04 +0000432; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
433; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
434; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000435
436; MOVREL: s_mov_b32 m0, [[READLANE]]
437; MOVREL: s_and_saveexec_b64 vcc, vcc
438; MOVREL-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63
439
440; IDXMODE: s_set_gpr_idx_idx [[READLANE]]
441; IDXMODE: s_and_saveexec_b64 vcc, vcc
442; IDXMODE-NEXT: v_mov_b32_e32 v[[VEC_ELT1]], 63
443
Matt Arsenault93401f42016-10-07 03:55:04 +0000444; GCN-NEXT: s_xor_b64 exec, exec, vcc
445; GCN: s_cbranch_execnz [[LOOP1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000446
Matt Arsenault93401f42016-10-07 03:55:04 +0000447; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000448
Matt Arsenault93401f42016-10-07 03:55:04 +0000449; GCN: buffer_store_dword [[INS0]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000450define amdgpu_kernel void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000451entry:
452 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
453 %id.ext = zext i32 %id to i64
454 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
455 %idx0 = load volatile i32, i32 addrspace(1)* %gep
456 %idx1 = add i32 %idx0, 1
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000457 %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
458 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000459 %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
460 store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000461 %cmp = icmp eq i32 %id, 0
462 br i1 %cmp, label %bb1, label %bb2
463
464bb1:
465 store volatile i32 %live.out.val, i32 addrspace(1)* undef
466 br label %bb2
467
468bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000469 ret void
470}
471
Matt Arsenault93401f42016-10-07 03:55:04 +0000472; GCN-LABEL: {{^}}extract_adjacent_blocks:
473; GCN: s_load_dword [[ARG:s[0-9]+]]
474; GCN: s_cmp_lg_u32
475; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000476
Matt Arsenault93401f42016-10-07 03:55:04 +0000477; GCN: buffer_load_dwordx4
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000478; MOVREL: s_mov_b32 m0,
479; MOVREL: v_movrels_b32_e32
480
481; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, src0
482; IDXMODE: v_mov_b32_e32
483; IDXMODE: s_set_gpr_idx_off
484
Matt Arsenault93401f42016-10-07 03:55:04 +0000485; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000486
Matt Arsenault93401f42016-10-07 03:55:04 +0000487; GCN: [[BB4]]:
488; GCN: buffer_load_dwordx4
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000489; MOVREL: s_mov_b32 m0,
490; MOVREL: v_movrels_b32_e32
491
492; IDXMODE: s_set_gpr_idx_on
493; IDXMODE: v_mov_b32_e32
494; IDXMODE: s_set_gpr_idx_off
Matt Arsenault9babdf42016-06-22 20:15:28 +0000495
Matt Arsenault93401f42016-10-07 03:55:04 +0000496; GCN: [[ENDBB]]:
497; GCN: buffer_store_dword
498; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000499define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) #0 {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000500bb:
501 %tmp = icmp eq i32 %arg, 0
502 br i1 %tmp, label %bb1, label %bb4
503
504bb1:
505 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
506 %tmp3 = extractelement <4 x float> %tmp2, i32 undef
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000507 call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp2) #0 ; Prevent block optimize out
Matt Arsenault9babdf42016-06-22 20:15:28 +0000508 br label %bb7
509
510bb4:
511 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
512 %tmp6 = extractelement <4 x float> %tmp5, i32 undef
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000513 call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp5) #0 ; Prevent block optimize out
Matt Arsenault9babdf42016-06-22 20:15:28 +0000514 br label %bb7
515
516bb7:
517 %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
518 store volatile float %tmp8, float addrspace(1)* undef
519 ret void
520}
521
Matt Arsenault93401f42016-10-07 03:55:04 +0000522; GCN-LABEL: {{^}}insert_adjacent_blocks:
523; GCN: s_load_dword [[ARG:s[0-9]+]]
524; GCN: s_cmp_lg_u32
525; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000526
Matt Arsenault93401f42016-10-07 03:55:04 +0000527; GCN: buffer_load_dwordx4
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000528; MOVREL: s_mov_b32 m0,
529; MOVREL: v_movreld_b32_e32
530
531; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, dst
532; IDXMODE: v_mov_b32_e32
533; IDXMODE: s_set_gpr_idx_off
534
Matt Arsenault93401f42016-10-07 03:55:04 +0000535; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000536
Matt Arsenault93401f42016-10-07 03:55:04 +0000537; GCN: [[BB4]]:
538; GCN: buffer_load_dwordx4
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000539; MOVREL: s_mov_b32 m0,
540; MOVREL: v_movreld_b32_e32
541
542; IDXMODE: s_set_gpr_idx_on s{{[0-9]+}}, dst
543; IDXMODE: v_mov_b32_e32
544; IDXMODE: s_set_gpr_idx_off
Matt Arsenault9babdf42016-06-22 20:15:28 +0000545
Matt Arsenault93401f42016-10-07 03:55:04 +0000546; GCN: [[ENDBB]]:
547; GCN: buffer_store_dword
548; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000549define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000550bb:
551 %tmp = icmp eq i32 %arg, 0
552 br i1 %tmp, label %bb1, label %bb4
553
554bb1: ; preds = %bb
555 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
556 %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000557 call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp3) #0 ; Prevent block optimize out
Matt Arsenault9babdf42016-06-22 20:15:28 +0000558 br label %bb7
559
560bb4: ; preds = %bb
561 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
562 %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000563 call void asm sideeffect "; reg use $0", "v"(<4 x float> %tmp6) #0 ; Prevent block optimize out
Matt Arsenault9babdf42016-06-22 20:15:28 +0000564 br label %bb7
565
566bb7: ; preds = %bb4, %bb1
567 %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
568 store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef
569 ret void
570}
571
572; FIXME: Should be able to fold zero input to movreld to inline imm?
573
Matt Arsenault93401f42016-10-07 03:55:04 +0000574; GCN-LABEL: {{^}}multi_same_block:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000575
Matt Arsenault93401f42016-10-07 03:55:04 +0000576; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000
577; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000
578; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000
579; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000
580; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000
581; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000
582; GCN-DAG: s_load_dword [[ARG:s[0-9]+]]
Matthias Braun325cd2c2016-11-11 01:34:21 +0000583; IDXMODE-DAG: s_add_i32 [[ARG_ADD:s[0-9]+]], [[ARG]], -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000584
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000585; MOVREL-DAG: s_add_i32 m0, [[ARG]], -16
586; MOVREL: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0
Matt Arsenault93401f42016-10-07 03:55:04 +0000587; GCN-NOT: m0
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000588
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000589; IDXMODE: s_set_gpr_idx_on [[ARG_ADD]], dst
590; IDXMODE: v_mov_b32_e32 v[[VEC0_ELT0]], 4.0
591; IDXMODE: s_set_gpr_idx_off
592
Matt Arsenault93401f42016-10-07 03:55:04 +0000593; GCN: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd
594; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd
595; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd
596; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd
597; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd
598; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000599
600; MOVREL: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0
601
602; IDXMODE: s_set_gpr_idx_on [[ARG_ADD]], dst
603; IDXMODE: v_mov_b32_e32 v[[VEC0_ELT2]], -4.0
604; IDXMODE: s_set_gpr_idx_off
Matt Arsenault9babdf42016-06-22 20:15:28 +0000605
Matt Arsenault93401f42016-10-07 03:55:04 +0000606; GCN: s_mov_b32 m0, -1
607; GCN: ds_write_b32
608; GCN: ds_write_b32
609; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000610define amdgpu_kernel void @multi_same_block(i32 %arg) #0 {
Matt Arsenault9babdf42016-06-22 20:15:28 +0000611bb:
612 %tmp1 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000613 %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 4.000000e+00, i32 %tmp1
Matt Arsenault9babdf42016-06-22 20:15:28 +0000614 %tmp3 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000615 %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float -4.0, i32 %tmp3
Matt Arsenault9babdf42016-06-22 20:15:28 +0000616 %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32>
617 %tmp6 = extractelement <6 x i32> %tmp5, i32 1
618 %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32>
619 %tmp8 = extractelement <6 x i32> %tmp7, i32 5
620 store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4
621 store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4
622 ret void
623}
624
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000625; offset puts outside of superegister bounaries, so clamp to 1st element.
Matt Arsenault93401f42016-10-07 03:55:04 +0000626; GCN-LABEL: {{^}}extract_largest_inbounds_offset:
627; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
628; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000629; MOVREL: s_mov_b32 m0, [[IDX]]
630; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
631
632; IDXMODE: s_set_gpr_idx_on [[IDX]], src0
633; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
634; IDXMODE: s_set_gpr_idx_off
635
Matt Arsenault93401f42016-10-07 03:55:04 +0000636; GCN: buffer_store_dword [[EXTRACT]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000637define amdgpu_kernel void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000638entry:
639 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
640 %offset = add i32 %idx, 3
641 %value = extractelement <4 x i32> %ld, i32 %offset
642 store i32 %value, i32 addrspace(1)* %out
643 ret void
644}
645
Matt Arsenault93401f42016-10-07 03:55:04 +0000646; GCN-LABEL: {{^}}extract_out_of_bounds_offset:
647; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
648; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000649; MOVREL: s_add_i32 m0, [[IDX]], 4
650; MOVREL: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
651
652; IDXMODE: s_add_i32 [[ADD_IDX:s[0-9]+]], [[IDX]], 4
653; IDXMODE: s_set_gpr_idx_on [[ADD_IDX]], src0
654; IDXMODE: v_mov_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
655; IDXMODE: s_set_gpr_idx_off
656
Matt Arsenault93401f42016-10-07 03:55:04 +0000657; GCN: buffer_store_dword [[EXTRACT]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000658define amdgpu_kernel void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000659entry:
660 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
661 %offset = add i32 %idx, 4
662 %value = extractelement <4 x i32> %ld, i32 %offset
663 store i32 %value, i32 addrspace(1)* %out
664 ret void
665}
666
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000667; Test that the or is folded into the base address register instead of
668; added to m0
669
Matt Arsenault93401f42016-10-07 03:55:04 +0000670; GCN-LABEL: {{^}}extractelement_v4i32_or_index:
671; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
672; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
673; GCN-NOT: [[IDX_SHL]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000674
675; MOVREL: s_mov_b32 m0, [[IDX_SHL]]
676; MOVREL: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
677
678; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], src0
679; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
680; IDXMODE: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000681define amdgpu_kernel void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000682entry:
683 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
684 %idx.shl = shl i32 %idx.in, 2
685 %idx = or i32 %idx.shl, 1
686 %value = extractelement <4 x i32> %ld, i32 %idx
687 store i32 %value, i32 addrspace(1)* %out
688 ret void
689}
690
Matt Arsenault93401f42016-10-07 03:55:04 +0000691; GCN-LABEL: {{^}}insertelement_v4f32_or_index:
692; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
693; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
694; GCN-NOT: [[IDX_SHL]]
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000695
696; MOVREL: s_mov_b32 m0, [[IDX_SHL]]
697; MOVREL: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
698
699; IDXMODE: s_set_gpr_idx_on [[IDX_SHL]], dst
700; IDXMODE: v_mov_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
701; IDXMODE: s_set_gpr_idx_off
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000702define amdgpu_kernel void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000703 %idx.shl = shl i32 %idx.in, 2
704 %idx = or i32 %idx.shl, 1
705 %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx
706 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
707 ret void
708}
709
Matt Arsenault93401f42016-10-07 03:55:04 +0000710; GCN-LABEL: {{^}}broken_phi_bb:
711; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000712
Matt Arsenault93401f42016-10-07 03:55:04 +0000713; GCN: s_branch [[BB2:BB[0-9]+_[0-9]+]]
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000714
Matt Arsenault93401f42016-10-07 03:55:04 +0000715; GCN: {{^BB[0-9]+_[0-9]+}}:
716; GCN: s_mov_b64 exec,
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000717; IDXMODE: s_set_gpr_idx_off
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000718
Matt Arsenault93401f42016-10-07 03:55:04 +0000719; GCN: [[BB2]]:
720; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]]
721; GCN: buffer_load_dword
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000722
Matt Arsenault93401f42016-10-07 03:55:04 +0000723; GCN: [[REGLOOP:BB[0-9]+_[0-9]+]]:
Matt Arsenaultd486d3f2016-10-12 18:49:05 +0000724; MOVREL: v_movreld_b32_e32
725
726; IDXMODE: s_set_gpr_idx_idx
727; IDXMODE: v_mov_b32_e32
Matt Arsenault93401f42016-10-07 03:55:04 +0000728; GCN: s_cbranch_execnz [[REGLOOP]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000729define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000730bb:
731 br label %bb2
732
733bb2: ; preds = %bb4, %bb
734 %tmp = phi i32 [ 8, %bb ], [ %tmp7, %bb4 ]
735 %tmp3 = icmp slt i32 %tmp, %arg
736 br i1 %tmp3, label %bb4, label %bb8
737
738bb4: ; preds = %bb2
739 %vgpr = load volatile i32, i32 addrspace(1)* undef
740 %tmp5 = insertelement <8 x i32> undef, i32 undef, i32 %vgpr
741 %tmp6 = insertelement <8 x i32> %tmp5, i32 %arg1, i32 %vgpr
742 %tmp7 = extractelement <8 x i32> %tmp6, i32 0
743 br label %bb2
744
745bb8: ; preds = %bb2
746 ret void
747}
748
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000749declare i32 @llvm.amdgcn.workitem.id.x() #1
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000750declare void @llvm.amdgcn.s.barrier() #2
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000751
Matt Arsenault9babdf42016-06-22 20:15:28 +0000752attributes #0 = { nounwind }
Tom Stellard8b0182a2015-04-23 20:32:01 +0000753attributes #1 = { nounwind readnone }
Matt Arsenaultad55ee52016-12-06 01:02:51 +0000754attributes #2 = { nounwind convergent }