blob: d5f53a384fc1689f04be788eb142944d39c9a17b [file] [log] [blame]
Matt Arsenault93401f42016-10-07 03:55:04 +00001; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
2; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
Tom Stellardeef2ad92013-08-05 22:45:56 +00003
4; Tests for indirect addressing on SI, which is implemented using dynamic
5; indexing of vectors.
6
Matt Arsenault93401f42016-10-07 03:55:04 +00007; GCN-LABEL: {{^}}extract_w_offset:
8; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
9; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
10; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
11; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 2.0
12; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 1.0
13; GCN-DAG: s_mov_b32 m0, [[IN]]
14; GCN: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
Tom Stellardeef2ad92013-08-05 22:45:56 +000015define void @extract_w_offset(float addrspace(1)* %out, i32 %in) {
16entry:
Matt Arsenault28419272015-10-07 00:42:51 +000017 %idx = add i32 %in, 1
18 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %idx
19 store float %elt, float addrspace(1)* %out
20 ret void
21}
22
23; XXX: Could do v_or_b32 directly
Matt Arsenault93401f42016-10-07 03:55:04 +000024; GCN-LABEL: {{^}}extract_w_offset_salu_use_vector:
25; GCN: s_mov_b32 m0
26; GCN-DAG: s_or_b32
27; GCN-DAG: s_or_b32
28; GCN-DAG: s_or_b32
29; GCN-DAG: s_or_b32
30; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
31; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
32; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
33; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}
34; GCN: v_movrels_b32_e32
Matt Arsenault28419272015-10-07 00:42:51 +000035define void @extract_w_offset_salu_use_vector(i32 addrspace(1)* %out, i32 %in, <4 x i32> %or.val) {
36entry:
37 %idx = add i32 %in, 1
38 %vec = or <4 x i32> %or.val, <i32 1, i32 2, i32 3, i32 4>
39 %elt = extractelement <4 x i32> %vec, i32 %idx
40 store i32 %elt, i32 addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000041 ret void
42}
43
Matt Arsenault93401f42016-10-07 03:55:04 +000044; GCN-LABEL: {{^}}extract_wo_offset:
45; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
46; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 4.0
47; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x40400000
48; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 2.0
49; GCN-DAG: v_mov_b32_e32 [[BASEREG:v[0-9]+]], 1.0
50; GCN-DAG: s_mov_b32 m0, [[IN]]
51; GCN: v_movrels_b32_e32 v{{[0-9]+}}, [[BASEREG]]
Tom Stellardeef2ad92013-08-05 22:45:56 +000052define void @extract_wo_offset(float addrspace(1)* %out, i32 %in) {
53entry:
Matt Arsenault28419272015-10-07 00:42:51 +000054 %elt = extractelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, i32 %in
55 store float %elt, float addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +000056 ret void
57}
58
Matt Arsenault93401f42016-10-07 03:55:04 +000059; GCN-LABEL: {{^}}extract_neg_offset_sgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +000060; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +000061; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
62; GCN: v_movrels_b32_e32 v{{[0-9]}}, v0
Tom Stellard8b0182a2015-04-23 20:32:01 +000063define void @extract_neg_offset_sgpr(i32 addrspace(1)* %out, i32 %offset) {
64entry:
65 %index = add i32 %offset, -512
66 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
67 store i32 %value, i32 addrspace(1)* %out
68 ret void
69}
70
Matt Arsenault93401f42016-10-07 03:55:04 +000071; GCN-LABEL: {{^}}extract_neg_offset_sgpr_loaded:
Matt Arsenault28419272015-10-07 00:42:51 +000072; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +000073; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
74; GCN: v_movrels_b32_e32 v{{[0-9]}}, v0
Matt Arsenault28419272015-10-07 00:42:51 +000075define void @extract_neg_offset_sgpr_loaded(i32 addrspace(1)* %out, <4 x i32> %vec0, <4 x i32> %vec1, i32 %offset) {
76entry:
77 %index = add i32 %offset, -512
78 %or = or <4 x i32> %vec0, %vec1
79 %value = extractelement <4 x i32> %or, i32 %index
80 store i32 %value, i32 addrspace(1)* %out
81 ret void
82}
83
Matt Arsenault93401f42016-10-07 03:55:04 +000084; GCN-LABEL: {{^}}extract_neg_offset_vgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +000085; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000086
87; FIXME: The waitcnt for the argument load can go after the loop
Matt Arsenault93401f42016-10-07 03:55:04 +000088; GCN: s_mov_b64 s{{\[[0-9]+:[0-9]+\]}}, exec
89; GCN: s_waitcnt lgkmcnt(0)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000090
Matt Arsenault93401f42016-10-07 03:55:04 +000091; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]], v{{[0-9]+}}
92; GCN: s_add_i32 m0, [[READLANE]], 0xfffffe0
93; GCN: v_movrels_b32_e32 [[RESULT:v[0-9]+]], v1
94; GCN: s_cbranch_execnz
Matt Arsenaultcb540bc2016-07-19 00:35:03 +000095
Matt Arsenault93401f42016-10-07 03:55:04 +000096; GCN: buffer_store_dword [[RESULT]]
Tom Stellard8b0182a2015-04-23 20:32:01 +000097define void @extract_neg_offset_vgpr(i32 addrspace(1)* %out) {
98entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +000099 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000100 %index = add i32 %id, -512
101 %value = extractelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 %index
102 store i32 %value, i32 addrspace(1)* %out
103 ret void
104}
105
Matt Arsenault93401f42016-10-07 03:55:04 +0000106; GCN-LABEL: {{^}}extract_undef_offset_sgpr:
Matt Arsenault21a46252016-06-27 19:57:44 +0000107define void @extract_undef_offset_sgpr(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
108entry:
109 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
110 %value = extractelement <4 x i32> %ld, i32 undef
111 store i32 %value, i32 addrspace(1)* %out
112 ret void
113}
114
Matt Arsenault93401f42016-10-07 03:55:04 +0000115; GCN-LABEL: {{^}}insert_undef_offset_sgpr_vector_src:
116; GCN-DAG: buffer_load_dwordx4
117; GCN-DAG: s_mov_b32 m0,
118; GCN: v_movreld_b32
Matt Arsenault21a46252016-06-27 19:57:44 +0000119define void @insert_undef_offset_sgpr_vector_src(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
120entry:
121 %ld = load <4 x i32>, <4 x i32> addrspace(1)* %in
122 %value = insertelement <4 x i32> %ld, i32 5, i32 undef
123 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
124 ret void
125}
126
Matt Arsenault93401f42016-10-07 03:55:04 +0000127; GCN-LABEL: {{^}}insert_w_offset:
128; GCN-DAG: s_load_dword [[IN:s[0-9]+]]
129; GCN-DAG: s_mov_b32 m0, [[IN]]
130; GCN-DAG: v_mov_b32_e32 v[[ELT0:[0-9]+]], 1.0
131; GCN-DAG: v_mov_b32_e32 v[[ELT1:[0-9]+]], 2.0
132; GCN-DAG: v_mov_b32_e32 v[[ELT2:[0-9]+]], 0x40400000
133; GCN-DAG: v_mov_b32_e32 v[[ELT3:[0-9]+]], 4.0
134; GCN-DAG: v_mov_b32_e32 v[[INS:[0-9]+]], 0x40a00000
135; GCN: v_movreld_b32_e32 v[[ELT1]], v[[INS]]
136; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]:[[ELT3]]{{\]}}
Matt Arsenaultf403df32016-08-26 06:31:32 +0000137define void @insert_w_offset(<4 x float> addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +0000138entry:
139 %0 = add i32 %in, 1
140 %1 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %0
Matt Arsenaultf403df32016-08-26 06:31:32 +0000141 store <4 x float> %1, <4 x float> addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +0000142 ret void
143}
144
Matt Arsenault93401f42016-10-07 03:55:04 +0000145; GCN-LABEL: {{^}}insert_wo_offset:
146; GCN: s_load_dword [[IN:s[0-9]+]]
147; GCN: s_mov_b32 m0, [[IN]]
148; GCN: v_movreld_b32_e32 v[[ELT0:[0-9]+]]
149; GCN: buffer_store_dwordx4 v{{\[}}[[ELT0]]:
Matt Arsenaultf403df32016-08-26 06:31:32 +0000150define void @insert_wo_offset(<4 x float> addrspace(1)* %out, i32 %in) {
Tom Stellardeef2ad92013-08-05 22:45:56 +0000151entry:
152 %0 = insertelement <4 x float> <float 1.0, float 2.0, float 3.0, float 4.0>, float 5.0, i32 %in
Matt Arsenaultf403df32016-08-26 06:31:32 +0000153 store <4 x float> %0, <4 x float> addrspace(1)* %out
Tom Stellardeef2ad92013-08-05 22:45:56 +0000154 ret void
155}
Tom Stellard8b0182a2015-04-23 20:32:01 +0000156
Matt Arsenault93401f42016-10-07 03:55:04 +0000157; GCN-LABEL: {{^}}insert_neg_offset_sgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +0000158; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +0000159; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
160; GCN: v_movreld_b32_e32 v0, 5
Tom Stellard8b0182a2015-04-23 20:32:01 +0000161define void @insert_neg_offset_sgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, i32 %offset) {
162entry:
163 %index = add i32 %offset, -512
164 %value = insertelement <4 x i32> <i32 0, i32 1, i32 2, i32 3>, i32 5, i32 %index
165 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
166 ret void
167}
168
Matt Arsenault28419272015-10-07 00:42:51 +0000169; The vector indexed into is originally loaded into an SGPR rather
170; than built with a reg_sequence
171
Matt Arsenault93401f42016-10-07 03:55:04 +0000172; GCN-LABEL: {{^}}insert_neg_offset_sgpr_loadreg:
Matt Arsenault28419272015-10-07 00:42:51 +0000173; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +0000174; GCN: s_add_i32 m0, s{{[0-9]+}}, 0xfffffe{{[0-9a-z]+}}
175; GCN: v_movreld_b32_e32 v0, 5
Matt Arsenault28419272015-10-07 00:42:51 +0000176define void @insert_neg_offset_sgpr_loadreg(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out, <4 x i32> %vec, i32 %offset) {
177entry:
178 %index = add i32 %offset, -512
179 %value = insertelement <4 x i32> %vec, i32 5, i32 %index
180 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
181 ret void
182}
183
Matt Arsenault93401f42016-10-07 03:55:04 +0000184; GCN-LABEL: {{^}}insert_neg_offset_vgpr:
Tom Stellard8b0182a2015-04-23 20:32:01 +0000185; The offset depends on the register that holds the first element of the vector.
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000186
Matt Arsenault93401f42016-10-07 03:55:04 +0000187; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
188; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
189; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
190; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000191
Matt Arsenault93401f42016-10-07 03:55:04 +0000192; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
193; GCN: s_waitcnt lgkmcnt(0)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000194
Matt Arsenault93401f42016-10-07 03:55:04 +0000195; GCN: [[LOOPBB:BB[0-9]+_[0-9]+]]:
196; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
197; GCN: s_add_i32 m0, [[READLANE]], 0xfffffe00
198; GCN: v_movreld_b32_e32 [[VEC_ELT0]], 5
199; GCN: s_cbranch_execnz [[LOOPBB]]
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000200
Matt Arsenault93401f42016-10-07 03:55:04 +0000201; GCN: s_mov_b64 exec, [[SAVEEXEC]]
202; GCN: buffer_store_dword
Tom Stellard8b0182a2015-04-23 20:32:01 +0000203define void @insert_neg_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
204entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000205 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000206 %index = add i32 %id, -512
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000207 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 5, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000208 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
209 ret void
210}
211
Matt Arsenault93401f42016-10-07 03:55:04 +0000212; GCN-LABEL: {{^}}insert_neg_inline_offset_vgpr:
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000213
Matt Arsenault93401f42016-10-07 03:55:04 +0000214; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], 1{{$}}
215; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], 2{{$}}
216; GCN-DAG: v_mov_b32_e32 [[VEC_ELT2:v[0-9]+]], 3{{$}}
217; GCN-DAG: v_mov_b32_e32 [[VEC_ELT3:v[0-9]+]], 4{{$}}
218; GCN-DAG: v_mov_b32_e32 [[VAL:v[0-9]+]], 0x1f4{{$}}
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000219
Matt Arsenault93401f42016-10-07 03:55:04 +0000220; GCN: s_mov_b64 [[SAVEEXEC:s\[[0-9]+:[0-9]+\]]], exec
221; GCN: s_waitcnt lgkmcnt(0)
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000222
Tom Stellard8b0182a2015-04-23 20:32:01 +0000223; The offset depends on the register that holds the first element of the vector.
Matt Arsenault93401f42016-10-07 03:55:04 +0000224; GCN: v_readfirstlane_b32 [[READLANE:s[0-9]+]]
225; GCN: s_add_i32 m0, [[READLANE]], -16
226; GCN: v_movreld_b32_e32 [[VEC_ELT0]], [[VAL]]
227; GCN: s_cbranch_execnz
Tom Stellard8b0182a2015-04-23 20:32:01 +0000228define void @insert_neg_inline_offset_vgpr(i32 addrspace(1)* %in, <4 x i32> addrspace(1)* %out) {
229entry:
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000230 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
Tom Stellard8b0182a2015-04-23 20:32:01 +0000231 %index = add i32 %id, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000232 %value = insertelement <4 x i32> <i32 1, i32 2, i32 3, i32 4>, i32 500, i32 %index
Tom Stellard8b0182a2015-04-23 20:32:01 +0000233 store <4 x i32> %value, <4 x i32> addrspace(1)* %out
234 ret void
235}
236
Matt Arsenault9babdf42016-06-22 20:15:28 +0000237; When the block is split to insert the loop, make sure any other
238; places that need to be expanded in the same block are also handled.
239
Matt Arsenault93401f42016-10-07 03:55:04 +0000240; GCN-LABEL: {{^}}extract_vgpr_offset_multiple_in_block:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000241
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000242; FIXME: Why is vector copied in between?
243
Matt Arsenault93401f42016-10-07 03:55:04 +0000244; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
245; GCN-DAG: s_mov_b32 [[S_ELT1:s[0-9]+]], 9
246; GCN-DAG: s_mov_b32 [[S_ELT0:s[0-9]+]], 7
247; GCN-DAG: v_mov_b32_e32 [[VEC_ELT0:v[0-9]+]], [[S_ELT0]]
248; GCN-DAG: v_mov_b32_e32 [[VEC_ELT1:v[0-9]+]], [[S_ELT1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000249
Matt Arsenault93401f42016-10-07 03:55:04 +0000250; GCN: s_mov_b64 [[MASK:s\[[0-9]+:[0-9]+\]]], exec
251; GCN: s_waitcnt vmcnt(0)
Matt Arsenault9babdf42016-06-22 20:15:28 +0000252
Matt Arsenault93401f42016-10-07 03:55:04 +0000253; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
254; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
255; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
256; GCN: s_mov_b32 m0, [[READLANE]]
257; GCN: s_and_saveexec_b64 vcc, vcc
258; GCN: v_movrels_b32_e32 [[MOVREL0:v[0-9]+]], [[VEC_ELT0]]
259; GCN-NEXT: s_xor_b64 exec, exec, vcc
260; GCN-NEXT: s_cbranch_execnz [[LOOP0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000261
262; FIXME: Redundant copy
Matt Arsenault93401f42016-10-07 03:55:04 +0000263; GCN: s_mov_b64 exec, [[MASK]]
264; GCN: v_mov_b32_e32 [[VEC_ELT1_2:v[0-9]+]], [[S_ELT1]]
265; GCN: s_mov_b64 [[MASK2:s\[[0-9]+:[0-9]+\]]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000266
Matt Arsenault93401f42016-10-07 03:55:04 +0000267; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
268; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
269; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
270; GCN: s_mov_b32 m0, [[READLANE]]
271; GCN: s_and_saveexec_b64 vcc, vcc
272; GCN-NEXT: v_movrels_b32_e32 [[MOVREL1:v[0-9]+]], [[VEC_ELT1_2]]
273; GCN-NEXT: s_xor_b64 exec, exec, vcc
274; GCN: s_cbranch_execnz [[LOOP1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000275
Matt Arsenault93401f42016-10-07 03:55:04 +0000276; GCN: buffer_store_dword [[MOVREL0]]
277; GCN: buffer_store_dword [[MOVREL1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000278define void @extract_vgpr_offset_multiple_in_block(i32 addrspace(1)* %out0, i32 addrspace(1)* %out1, i32 addrspace(1)* %in) #0 {
279entry:
280 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
281 %id.ext = zext i32 %id to i64
282 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
283 %idx0 = load volatile i32, i32 addrspace(1)* %gep
284 %idx1 = add i32 %idx0, 1
285 %val0 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000286 %live.out.reg = call i32 asm sideeffect "s_mov_b32 $0, 17", "={SGPR4}" ()
Matt Arsenault9babdf42016-06-22 20:15:28 +0000287 %val1 = extractelement <4 x i32> <i32 7, i32 9, i32 11, i32 13>, i32 %idx1
288 store volatile i32 %val0, i32 addrspace(1)* %out0
289 store volatile i32 %val1, i32 addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000290 %cmp = icmp eq i32 %id, 0
291 br i1 %cmp, label %bb1, label %bb2
292
293bb1:
294 store volatile i32 %live.out.reg, i32 addrspace(1)* undef
295 br label %bb2
296
297bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000298 ret void
299}
300
Matt Arsenault93401f42016-10-07 03:55:04 +0000301; GCN-LABEL: {{^}}insert_vgpr_offset_multiple_in_block:
302; GCN-DAG: s_load_dwordx4 s{{\[}}[[S_ELT0:[0-9]+]]:[[S_ELT3:[0-9]+]]{{\]}}
303; GCN-DAG: {{buffer|flat}}_load_dword [[IDX0:v[0-9]+]]
304; GCN-DAG: v_mov_b32 [[INS0:v[0-9]+]], 62
Matt Arsenault9babdf42016-06-22 20:15:28 +0000305
Matt Arsenault93401f42016-10-07 03:55:04 +0000306; GCN-DAG: v_mov_b32_e32 v[[VEC_ELT3:[0-9]+]], s[[S_ELT3]]
307; GCN: v_mov_b32_e32 v[[VEC_ELT2:[0-9]+]], s{{[0-9]+}}
308; GCN: v_mov_b32_e32 v[[VEC_ELT1:[0-9]+]], s{{[0-9]+}}
309; GCN: v_mov_b32_e32 v[[VEC_ELT0:[0-9]+]], s[[S_ELT0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000310
Matt Arsenault93401f42016-10-07 03:55:04 +0000311; GCN: [[LOOP0:BB[0-9]+_[0-9]+]]:
312; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
313; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
314; GCN: s_mov_b32 m0, [[READLANE]]
315; GCN: s_and_saveexec_b64 vcc, vcc
316; GCN-NEXT: v_movreld_b32_e32 v[[VEC_ELT0]], [[INS0]]
317; GCN-NEXT: s_xor_b64 exec, exec, vcc
318; GCN: s_cbranch_execnz [[LOOP0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000319
320; FIXME: Redundant copy
Matt Arsenault93401f42016-10-07 03:55:04 +0000321; GCN: s_mov_b64 exec, [[MASK:s\[[0-9]+:[0-9]+\]]]
322; GCN: s_mov_b64 [[MASK]], exec
Matt Arsenault9babdf42016-06-22 20:15:28 +0000323
Matt Arsenault93401f42016-10-07 03:55:04 +0000324; GCN: [[LOOP1:BB[0-9]+_[0-9]+]]:
325; GCN-NEXT: v_readfirstlane_b32 [[READLANE:s[0-9]+]], [[IDX0]]
326; GCN: v_cmp_eq_u32_e32 vcc, [[READLANE]], [[IDX0]]
327; GCN: s_mov_b32 m0, [[READLANE]]
328; GCN: s_and_saveexec_b64 vcc, vcc
329; GCN-NEXT: v_movreld_b32_e32 v[[VEC_ELT1]], 63
330; GCN-NEXT: s_xor_b64 exec, exec, vcc
331; GCN: s_cbranch_execnz [[LOOP1]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000332
Matt Arsenault93401f42016-10-07 03:55:04 +0000333; GCN: buffer_store_dwordx4 v{{\[}}[[VEC_ELT0]]:
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000334
Matt Arsenault93401f42016-10-07 03:55:04 +0000335; GCN: buffer_store_dword [[INS0]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000336define void @insert_vgpr_offset_multiple_in_block(<4 x i32> addrspace(1)* %out0, <4 x i32> addrspace(1)* %out1, i32 addrspace(1)* %in, <4 x i32> %vec0) #0 {
337entry:
338 %id = call i32 @llvm.amdgcn.workitem.id.x() #1
339 %id.ext = zext i32 %id to i64
340 %gep = getelementptr inbounds i32, i32 addrspace(1)* %in, i64 %id.ext
341 %idx0 = load volatile i32, i32 addrspace(1)* %gep
342 %idx1 = add i32 %idx0, 1
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000343 %live.out.val = call i32 asm sideeffect "v_mov_b32 $0, 62", "=v"()
344 %vec1 = insertelement <4 x i32> %vec0, i32 %live.out.val, i32 %idx0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000345 %vec2 = insertelement <4 x i32> %vec1, i32 63, i32 %idx1
346 store volatile <4 x i32> %vec2, <4 x i32> addrspace(1)* %out0
Matt Arsenault3cb4dde2016-06-22 23:40:57 +0000347 %cmp = icmp eq i32 %id, 0
348 br i1 %cmp, label %bb1, label %bb2
349
350bb1:
351 store volatile i32 %live.out.val, i32 addrspace(1)* undef
352 br label %bb2
353
354bb2:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000355 ret void
356}
357
Matt Arsenault93401f42016-10-07 03:55:04 +0000358; GCN-LABEL: {{^}}extract_adjacent_blocks:
359; GCN: s_load_dword [[ARG:s[0-9]+]]
360; GCN: s_cmp_lg_u32
361; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000362
Matt Arsenault93401f42016-10-07 03:55:04 +0000363; GCN: buffer_load_dwordx4
364; GCN: s_mov_b32 m0,
365; GCN: v_movrels_b32_e32
366; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000367
Matt Arsenault93401f42016-10-07 03:55:04 +0000368; GCN: [[BB4]]:
369; GCN: buffer_load_dwordx4
370; GCN: s_mov_b32 m0,
371; GCN: v_movrels_b32_e32
Matt Arsenault9babdf42016-06-22 20:15:28 +0000372
Matt Arsenault93401f42016-10-07 03:55:04 +0000373; GCN: [[ENDBB]]:
374; GCN: buffer_store_dword
375; GCN: s_endpgm
Matt Arsenault9babdf42016-06-22 20:15:28 +0000376define void @extract_adjacent_blocks(i32 %arg) #0 {
377bb:
378 %tmp = icmp eq i32 %arg, 0
379 br i1 %tmp, label %bb1, label %bb4
380
381bb1:
382 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
383 %tmp3 = extractelement <4 x float> %tmp2, i32 undef
384 br label %bb7
385
386bb4:
387 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
388 %tmp6 = extractelement <4 x float> %tmp5, i32 undef
389 br label %bb7
390
391bb7:
392 %tmp8 = phi float [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
393 store volatile float %tmp8, float addrspace(1)* undef
394 ret void
395}
396
Matt Arsenault93401f42016-10-07 03:55:04 +0000397; GCN-LABEL: {{^}}insert_adjacent_blocks:
398; GCN: s_load_dword [[ARG:s[0-9]+]]
399; GCN: s_cmp_lg_u32
400; GCN: s_cbranch_scc0 [[BB4:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000401
Matt Arsenault93401f42016-10-07 03:55:04 +0000402; GCN: buffer_load_dwordx4
403; GCN: s_mov_b32 m0,
404; GCN: v_movreld_b32_e32
405; GCN: s_branch [[ENDBB:BB[0-9]+_[0-9]+]]
Matt Arsenault9babdf42016-06-22 20:15:28 +0000406
Matt Arsenault93401f42016-10-07 03:55:04 +0000407; GCN: [[BB4]]:
408; GCN: buffer_load_dwordx4
409; GCN: s_mov_b32 m0,
410; GCN: v_movreld_b32_e32
Matt Arsenault9babdf42016-06-22 20:15:28 +0000411
Matt Arsenault93401f42016-10-07 03:55:04 +0000412; GCN: [[ENDBB]]:
413; GCN: buffer_store_dword
414; GCN: s_endpgm
Matt Arsenault9babdf42016-06-22 20:15:28 +0000415define void @insert_adjacent_blocks(i32 %arg, float %val0) #0 {
416bb:
417 %tmp = icmp eq i32 %arg, 0
418 br i1 %tmp, label %bb1, label %bb4
419
420bb1: ; preds = %bb
421 %tmp2 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
422 %tmp3 = insertelement <4 x float> %tmp2, float %val0, i32 undef
423 br label %bb7
424
425bb4: ; preds = %bb
426 %tmp5 = load volatile <4 x float>, <4 x float> addrspace(1)* undef
427 %tmp6 = insertelement <4 x float> %tmp5, float %val0, i32 undef
428 br label %bb7
429
430bb7: ; preds = %bb4, %bb1
431 %tmp8 = phi <4 x float> [ %tmp3, %bb1 ], [ %tmp6, %bb4 ]
432 store volatile <4 x float> %tmp8, <4 x float> addrspace(1)* undef
433 ret void
434}
435
436; FIXME: Should be able to fold zero input to movreld to inline imm?
437
Matt Arsenault93401f42016-10-07 03:55:04 +0000438; GCN-LABEL: {{^}}multi_same_block:
Matt Arsenault9babdf42016-06-22 20:15:28 +0000439
Matt Arsenault93401f42016-10-07 03:55:04 +0000440; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT0:[0-9]+]], 0x41880000
441; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41900000
442; GCN-DAG: v_mov_b32_e32 v[[VEC0_ELT2:[0-9]+]], 0x41980000
443; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a00000
444; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a80000
445; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b00000
446; GCN-DAG: s_load_dword [[ARG:s[0-9]+]]
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000447
Matt Arsenault93401f42016-10-07 03:55:04 +0000448; GCN-DAG: s_add_i32 m0, [[ARG]], -16
449; GCN: v_movreld_b32_e32 v[[VEC0_ELT0]], 4.0
450; GCN-NOT: m0
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000451
Matt Arsenault93401f42016-10-07 03:55:04 +0000452; GCN: v_mov_b32_e32 v[[VEC0_ELT2]], 0x4188cccd
453; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4190cccd
454; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x4198cccd
455; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a0cccd
456; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41a8cccd
457; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, 0x41b0cccd
458; GCN: v_movreld_b32_e32 v[[VEC0_ELT2]], -4.0
Matt Arsenault9babdf42016-06-22 20:15:28 +0000459
Matt Arsenault93401f42016-10-07 03:55:04 +0000460; GCN: s_mov_b32 m0, -1
461; GCN: ds_write_b32
462; GCN: ds_write_b32
463; GCN: s_endpgm
Matt Arsenault9babdf42016-06-22 20:15:28 +0000464define void @multi_same_block(i32 %arg) #0 {
465bb:
466 %tmp1 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000467 %tmp2 = insertelement <6 x float> <float 1.700000e+01, float 1.800000e+01, float 1.900000e+01, float 2.000000e+01, float 2.100000e+01, float 2.200000e+01>, float 4.000000e+00, i32 %tmp1
Matt Arsenault9babdf42016-06-22 20:15:28 +0000468 %tmp3 = add i32 %arg, -16
Matt Arsenaultcb540bc2016-07-19 00:35:03 +0000469 %tmp4 = insertelement <6 x float> <float 0x40311999A0000000, float 0x40321999A0000000, float 0x40331999A0000000, float 0x40341999A0000000, float 0x40351999A0000000, float 0x40361999A0000000>, float -4.0, i32 %tmp3
Matt Arsenault9babdf42016-06-22 20:15:28 +0000470 %tmp5 = bitcast <6 x float> %tmp2 to <6 x i32>
471 %tmp6 = extractelement <6 x i32> %tmp5, i32 1
472 %tmp7 = bitcast <6 x float> %tmp4 to <6 x i32>
473 %tmp8 = extractelement <6 x i32> %tmp7, i32 5
474 store volatile i32 %tmp6, i32 addrspace(3)* undef, align 4
475 store volatile i32 %tmp8, i32 addrspace(3)* undef, align 4
476 ret void
477}
478
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000479; offset puts outside of superegister bounaries, so clamp to 1st element.
Matt Arsenault93401f42016-10-07 03:55:04 +0000480; GCN-LABEL: {{^}}extract_largest_inbounds_offset:
481; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
482; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
483; GCN: s_mov_b32 m0, [[IDX]]
484; GCN: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[HI_ELT]]
485; GCN: buffer_store_dword [[EXTRACT]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000486define void @extract_largest_inbounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
487entry:
488 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
489 %offset = add i32 %idx, 3
490 %value = extractelement <4 x i32> %ld, i32 %offset
491 store i32 %value, i32 addrspace(1)* %out
492 ret void
493}
494
Matt Arsenault93401f42016-10-07 03:55:04 +0000495; GCN-LABEL: {{^}}extract_out_of_bounds_offset:
496; GCN-DAG: buffer_load_dwordx4 v{{\[}}[[LO_ELT:[0-9]+]]:[[HI_ELT:[0-9]+]]{{\]}}
497; GCN-DAG: s_load_dword [[IDX:s[0-9]+]]
498; GCN: s_add_i32 m0, [[IDX]], 4
499; GCN: v_movrels_b32_e32 [[EXTRACT:v[0-9]+]], v[[LO_ELT]]
500; GCN: buffer_store_dword [[EXTRACT]]
Matt Arsenaultb4d95032016-06-28 01:09:00 +0000501define void @extract_out_of_bounds_offset(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx) {
502entry:
503 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
504 %offset = add i32 %idx, 4
505 %value = extractelement <4 x i32> %ld, i32 %offset
506 store i32 %value, i32 addrspace(1)* %out
507 ret void
508}
509
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000510; Test that the or is folded into the base address register instead of
511; added to m0
512
Matt Arsenault93401f42016-10-07 03:55:04 +0000513; GCN-LABEL: {{^}}extractelement_v4i32_or_index:
514; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
515; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
516; GCN-NOT: [[IDX_SHL]]
517; GCN: s_mov_b32 m0, [[IDX_SHL]]
518; GCN: v_movrels_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000519define void @extractelement_v4i32_or_index(i32 addrspace(1)* %out, <4 x i32> addrspace(1)* %in, i32 %idx.in) {
520entry:
521 %ld = load volatile <4 x i32>, <4 x i32> addrspace(1)* %in
522 %idx.shl = shl i32 %idx.in, 2
523 %idx = or i32 %idx.shl, 1
524 %value = extractelement <4 x i32> %ld, i32 %idx
525 store i32 %value, i32 addrspace(1)* %out
526 ret void
527}
528
Matt Arsenault93401f42016-10-07 03:55:04 +0000529; GCN-LABEL: {{^}}insertelement_v4f32_or_index:
530; GCN: s_load_dword [[IDX_IN:s[0-9]+]]
531; GCN: s_lshl_b32 [[IDX_SHL:s[0-9]+]], [[IDX_IN]]
532; GCN-NOT: [[IDX_SHL]]
533; GCN: s_mov_b32 m0, [[IDX_SHL]]
534; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault1322b6f2016-07-09 01:13:56 +0000535define void @insertelement_v4f32_or_index(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %idx.in) nounwind {
536 %idx.shl = shl i32 %idx.in, 2
537 %idx = or i32 %idx.shl, 1
538 %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %idx
539 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16
540 ret void
541}
542
Matt Arsenault93401f42016-10-07 03:55:04 +0000543; GCN-LABEL: {{^}}broken_phi_bb:
544; GCN: v_mov_b32_e32 [[PHIREG:v[0-9]+]], 8
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000545
Matt Arsenault93401f42016-10-07 03:55:04 +0000546; GCN: s_branch [[BB2:BB[0-9]+_[0-9]+]]
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000547
Matt Arsenault93401f42016-10-07 03:55:04 +0000548; GCN: {{^BB[0-9]+_[0-9]+}}:
549; GCN: s_mov_b64 exec,
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000550
Matt Arsenault93401f42016-10-07 03:55:04 +0000551; GCN: [[BB2]]:
552; GCN: v_cmp_le_i32_e32 vcc, s{{[0-9]+}}, [[PHIREG]]
553; GCN: buffer_load_dword
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000554
Matt Arsenault93401f42016-10-07 03:55:04 +0000555; GCN: [[REGLOOP:BB[0-9]+_[0-9]+]]:
556; GCN: v_movreld_b32_e32
557; GCN: s_cbranch_execnz [[REGLOOP]]
Matt Arsenaultf0ba86a2016-07-21 09:40:57 +0000558define void @broken_phi_bb(i32 %arg, i32 %arg1) #0 {
559bb:
560 br label %bb2
561
562bb2: ; preds = %bb4, %bb
563 %tmp = phi i32 [ 8, %bb ], [ %tmp7, %bb4 ]
564 %tmp3 = icmp slt i32 %tmp, %arg
565 br i1 %tmp3, label %bb4, label %bb8
566
567bb4: ; preds = %bb2
568 %vgpr = load volatile i32, i32 addrspace(1)* undef
569 %tmp5 = insertelement <8 x i32> undef, i32 undef, i32 %vgpr
570 %tmp6 = insertelement <8 x i32> %tmp5, i32 %arg1, i32 %vgpr
571 %tmp7 = extractelement <8 x i32> %tmp6, i32 0
572 br label %bb2
573
574bb8: ; preds = %bb2
575 ret void
576}
577
Matt Arsenault9c47dd52016-02-11 06:02:01 +0000578declare i32 @llvm.amdgcn.workitem.id.x() #1
579
Matt Arsenault9babdf42016-06-22 20:15:28 +0000580attributes #0 = { nounwind }
Tom Stellard8b0182a2015-04-23 20:32:01 +0000581attributes #1 = { nounwind readnone }