blob: f3faa39c64e68570e4a69873d8c1b1c4511d2c1e [file] [log] [blame]
Alexander Timofeev982aee62017-07-04 17:32:00 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tahiti < %s | FileCheck -check-prefix=FAST64 -check-prefix=GCN %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=bonaire < %s | FileCheck -check-prefix=SLOW64 -check-prefix=GCN %s
Matt Arsenault80edab92016-01-18 21:43:36 +00003
4
5; lshr (i64 x), c: c > 32 => reg_sequence lshr (i32 hi_32(x)), (c - 32), 0
6; GCN-LABEL: {{^}}lshr_i64_35:
Tom Stellardcb6ba622016-04-30 00:23:06 +00007; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]]
8; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 3, [[VAL]]
9; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
Matt Arsenault80edab92016-01-18 21:43:36 +000010; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000011define amdgpu_kernel void @lshr_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault80edab92016-01-18 21:43:36 +000012 %val = load i64, i64 addrspace(1)* %in
13 %shl = lshr i64 %val, 35
14 store i64 %shl, i64 addrspace(1)* %out
15 ret void
16}
17
18; GCN-LABEL: {{^}}lshr_i64_63:
Tom Stellardcb6ba622016-04-30 00:23:06 +000019; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]]
20; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 31, [[VAL]]
21; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
Matt Arsenault80edab92016-01-18 21:43:36 +000022; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000023define amdgpu_kernel void @lshr_i64_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault80edab92016-01-18 21:43:36 +000024 %val = load i64, i64 addrspace(1)* %in
25 %shl = lshr i64 %val, 63
26 store i64 %shl, i64 addrspace(1)* %out
27 ret void
28}
29
30; GCN-LABEL: {{^}}lshr_i64_33:
Tom Stellardcb6ba622016-04-30 00:23:06 +000031; GCN-DAG: buffer_load_dword [[VAL:v[0-9]+]]
32; GCN-DAG: v_lshrrev_b32_e32 v[[LO:[0-9]+]], 1, [[VAL]]
33; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
Matt Arsenault80edab92016-01-18 21:43:36 +000034; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000035define amdgpu_kernel void @lshr_i64_33(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault80edab92016-01-18 21:43:36 +000036 %val = load i64, i64 addrspace(1)* %in
37 %shl = lshr i64 %val, 33
38 store i64 %shl, i64 addrspace(1)* %out
39 ret void
40}
41
42; GCN-LABEL: {{^}}lshr_i64_32:
Changpeng Fang71369b32016-05-26 19:35:29 +000043; GCN-DAG: buffer_load_dword v[[LO:[0-9]+]]
44; GCN-DAG: v_mov_b32_e32 v[[HI:[0-9]+]], 0{{$}}
Matt Arsenault80edab92016-01-18 21:43:36 +000045; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000046define amdgpu_kernel void @lshr_i64_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault80edab92016-01-18 21:43:36 +000047 %val = load i64, i64 addrspace(1)* %in
48 %shl = lshr i64 %val, 32
49 store i64 %shl, i64 addrspace(1)* %out
50 ret void
51}
52
Matt Arsenault6e3a4512016-01-18 22:01:13 +000053; Make sure the and of the constant doesn't prevent bfe from forming
54; after 64-bit shift is split.
55
Matt Arsenault80edab92016-01-18 21:43:36 +000056; GCN-LABEL: {{^}}lshr_and_i64_35:
Matt Arsenault6e3a4512016-01-18 22:01:13 +000057; GCN: buffer_load_dwordx2 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
58; GCN: v_bfe_u32 v[[BFE:[0-9]+]], v[[HI]], 8, 23
59; GCN: v_mov_b32_e32 v[[ZERO:[0-9]+]], 0{{$}}
60; GCN: buffer_store_dwordx2 v{{\[}}[[BFE]]:[[ZERO]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000061define amdgpu_kernel void @lshr_and_i64_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault80edab92016-01-18 21:43:36 +000062 %val = load i64, i64 addrspace(1)* %in
Matt Arsenault6e3a4512016-01-18 22:01:13 +000063 %and = and i64 %val, 9223372036854775807 ; 0x7fffffffffffffff
64 %shl = lshr i64 %and, 40
Matt Arsenault80edab92016-01-18 21:43:36 +000065 store i64 %shl, i64 addrspace(1)* %out
66 ret void
67}
Matt Arsenault3cbbc102016-01-18 21:55:14 +000068
69; lshl (i64 x), c: c > 32 => reg_sequence lshl 0, (i32 lo_32(x)), (c - 32)
70
71; GCN-LABEL: {{^}}shl_i64_const_35:
72; GCN: buffer_load_dword [[VAL:v[0-9]+]]
73; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 3, [[VAL]]
74; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
75; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000076define amdgpu_kernel void @shl_i64_const_35(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault3cbbc102016-01-18 21:55:14 +000077 %val = load i64, i64 addrspace(1)* %in
78 %shl = shl i64 %val, 35
79 store i64 %shl, i64 addrspace(1)* %out
80 ret void
81}
82
83; GCN-LABEL: {{^}}shl_i64_const_32:
Changpeng Fang71369b32016-05-26 19:35:29 +000084; GCN-DAG: buffer_load_dword v[[HI:[0-9]+]]
85; GCN-DAG: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
Matt Arsenault3cbbc102016-01-18 21:55:14 +000086; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000087define amdgpu_kernel void @shl_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault3cbbc102016-01-18 21:55:14 +000088 %val = load i64, i64 addrspace(1)* %in
89 %shl = shl i64 %val, 32
90 store i64 %shl, i64 addrspace(1)* %out
91 ret void
92}
93
94; GCN-LABEL: {{^}}shl_i64_const_63:
95; GCN: buffer_load_dword [[VAL:v[0-9]+]]
96; GCN: v_lshlrev_b32_e32 v[[HI:[0-9]+]], 31, [[VAL]]
97; GCN: v_mov_b32_e32 v[[LO:[0-9]+]], 0{{$}}
98; GCN: buffer_store_dwordx2 v{{\[}}[[LO]]:[[HI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000099define amdgpu_kernel void @shl_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault3cbbc102016-01-18 21:55:14 +0000100 %val = load i64, i64 addrspace(1)* %in
101 %shl = shl i64 %val, 63
102 store i64 %shl, i64 addrspace(1)* %out
103 ret void
104}
105
106; ashr (i64 x), 63 => (ashr lo(x), 31), lo(x)
107
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000108; GCN-LABEL: {{^}}ashr_i64_const_32:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000109define amdgpu_kernel void @ashr_i64_const_32(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault3cbbc102016-01-18 21:55:14 +0000110 %val = load i64, i64 addrspace(1)* %in
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000111 %shl = ashr i64 %val, 32
112 store i64 %shl, i64 addrspace(1)* %out
113 ret void
114}
115
116; GCN-LABEL: {{^}}ashr_i64_const_63:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000117define amdgpu_kernel void @ashr_i64_const_63(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenault33e3ece2016-01-18 22:09:04 +0000118 %val = load i64, i64 addrspace(1)* %in
119 %shl = ashr i64 %val, 63
Matt Arsenault3cbbc102016-01-18 21:55:14 +0000120 store i64 %shl, i64 addrspace(1)* %out
121 ret void
122}
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000123
124; GCN-LABEL: {{^}}trunc_shl_31_i32_i64:
125; GCN: buffer_load_dword [[VAL:v[0-9]+]]
126; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 31, [[VAL]]
127; GCN: buffer_store_dword [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000128define amdgpu_kernel void @trunc_shl_31_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000129 %val = load i64, i64 addrspace(1)* %in
130 %shl = shl i64 %val, 31
131 %trunc = trunc i64 %shl to i32
132 store i32 %trunc, i32 addrspace(1)* %out
133 ret void
134}
135
136; GCN-LABEL: {{^}}trunc_shl_15_i16_i64:
137; GCN: buffer_load_dword [[VAL:v[0-9]+]]
138; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]]
139; GCN: buffer_store_short [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000140define amdgpu_kernel void @trunc_shl_15_i16_i64(i16 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000141 %val = load i64, i64 addrspace(1)* %in
142 %shl = shl i64 %val, 15
143 %trunc = trunc i64 %shl to i16
144 store i16 %trunc, i16 addrspace(1)* %out
145 ret void
146}
147
148; GCN-LABEL: {{^}}trunc_shl_15_i16_i32:
149; GCN: buffer_load_dword [[VAL:v[0-9]+]]
150; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 15, [[VAL]]
151; GCN: buffer_store_short [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000152define amdgpu_kernel void @trunc_shl_15_i16_i32(i16 addrspace(1)* %out, i32 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000153 %val = load i32, i32 addrspace(1)* %in
154 %shl = shl i32 %val, 15
155 %trunc = trunc i32 %shl to i16
156 store i16 %trunc, i16 addrspace(1)* %out
157 ret void
158}
159
160; GCN-LABEL: {{^}}trunc_shl_7_i8_i64:
161; GCN: buffer_load_dword [[VAL:v[0-9]+]]
162; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 7, [[VAL]]
163; GCN: buffer_store_byte [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000164define amdgpu_kernel void @trunc_shl_7_i8_i64(i8 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000165 %val = load i64, i64 addrspace(1)* %in
166 %shl = shl i64 %val, 7
167 %trunc = trunc i64 %shl to i8
168 store i8 %trunc, i8 addrspace(1)* %out
169 ret void
170}
171
172; GCN-LABEL: {{^}}trunc_shl_1_i2_i64:
173; GCN: buffer_load_dword [[VAL:v[0-9]+]]
174; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 1, [[VAL]]
175; GCN: v_and_b32_e32 [[AND:v[0-9]+]], 2, [[SHL]]
176; GCN: buffer_store_byte [[AND]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000177define amdgpu_kernel void @trunc_shl_1_i2_i64(i2 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000178 %val = load i64, i64 addrspace(1)* %in
179 %shl = shl i64 %val, 1
180 %trunc = trunc i64 %shl to i2
181 store i2 %trunc, i2 addrspace(1)* %out
182 ret void
183}
184
185; GCN-LABEL: {{^}}trunc_shl_1_i32_i64:
186; GCN: buffer_load_dword [[VAL:v[0-9]+]]
187; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 1, [[VAL]]
188; GCN: buffer_store_dword [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000189define amdgpu_kernel void @trunc_shl_1_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000190 %val = load i64, i64 addrspace(1)* %in
191 %shl = shl i64 %val, 1
192 %trunc = trunc i64 %shl to i32
193 store i32 %trunc, i32 addrspace(1)* %out
194 ret void
195}
196
197; GCN-LABEL: {{^}}trunc_shl_16_i32_i64:
198; GCN: buffer_load_dword [[VAL:v[0-9]+]]
199; GCN: v_lshlrev_b32_e32 [[SHL:v[0-9]+]], 16, [[VAL]]
200; GCN: buffer_store_dword [[SHL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000201define amdgpu_kernel void @trunc_shl_16_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000202 %val = load i64, i64 addrspace(1)* %in
203 %shl = shl i64 %val, 16
204 %trunc = trunc i64 %shl to i32
205 store i32 %trunc, i32 addrspace(1)* %out
206 ret void
207}
208
209; GCN-LABEL: {{^}}trunc_shl_33_i32_i64:
210; GCN: v_mov_b32_e32 [[ZERO:v[0-9]+]], 0{{$}}
211; GCN: buffer_store_dword [[ZERO]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000212define amdgpu_kernel void @trunc_shl_33_i32_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000213 %val = load i64, i64 addrspace(1)* %in
214 %shl = shl i64 %val, 33
215 %trunc = trunc i64 %shl to i32
216 store i32 %trunc, i32 addrspace(1)* %out
217 ret void
218}
219
220; GCN-LABEL: {{^}}trunc_shl_16_v2i32_v2i64:
221; GCN: buffer_load_dwordx4 v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]{{\]}}
222; GCN-DAG: v_lshlrev_b32_e32 v[[RESHI:[0-9]+]], 16, v{{[0-9]+}}
223; GCN-DAG: v_lshlrev_b32_e32 v[[RESLO:[0-9]+]], 16, v[[LO]]
224; GCN: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000225define amdgpu_kernel void @trunc_shl_16_v2i32_v2i64(<2 x i32> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000226 %val = load <2 x i64>, <2 x i64> addrspace(1)* %in
227 %shl = shl <2 x i64> %val, <i64 16, i64 16>
228 %trunc = trunc <2 x i64> %shl to <2 x i32>
229 store <2 x i32> %trunc, <2 x i32> addrspace(1)* %out
230 ret void
231}
232
233; GCN-LABEL: {{^}}trunc_shl_31_i32_i64_multi_use:
234; GCN: buffer_load_dwordx2 [[VAL:v\[[0-9]+:[0-9]+\]]]
235; GCN: v_lshl_b64 v{{\[}}[[RESLO:[0-9]+]]:[[RESHI:[0-9]+]]{{\]}}, [[VAL]], 31
236; GCN: buffer_store_dword v[[RESLO]]
237; GCN: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000238define amdgpu_kernel void @trunc_shl_31_i32_i64_multi_use(i32 addrspace(1)* %out, i64 addrspace(1)* %in) {
Matt Arsenaultab2232c2016-04-29 19:53:16 +0000239 %val = load i64, i64 addrspace(1)* %in
240 %shl = shl i64 %val, 31
241 %trunc = trunc i64 %shl to i32
242 store volatile i32 %trunc, i32 addrspace(1)* %out
243 store volatile i64 %shl, i64 addrspace(1)* %in
244 ret void
245}
Stanislav Mekhanoshineb407332017-06-28 02:37:11 +0000246
247; GCN-LABEL: {{^}}trunc_shl_and31:
248; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 31
249; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
250; GCN-NOT: v_lshl_b64
251; GCN-NOT: v_lshlrev_b64
252define amdgpu_kernel void @trunc_shl_and31(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
253bb:
254 %tmp = load i64, i64 addrspace(1)* %arg, align 8
255 %tmp3 = and i32 %arg2, 31
256 %tmp4 = zext i32 %tmp3 to i64
257 %tmp5 = shl i64 %tmp, %tmp4
258 %tmp6 = trunc i64 %tmp5 to i32
259 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
260 ret void
261}
262
263; GCN-LABEL: {{^}}trunc_shl_and30:
264; GCN: s_and_b32 s[[AMT:[0-9]+]], s{{[0-9]+}}, 30
265; GCN: v_lshlrev_b32_e32 v{{[0-9]+}}, s[[AMT]], v{{[0-9]+}}
266; GCN-NOT: v_lshl_b64
267; GCN-NOT: v_lshlrev_b64
268define amdgpu_kernel void @trunc_shl_and30(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
269bb:
270 %tmp = load i64, i64 addrspace(1)* %arg, align 8
271 %tmp3 = and i32 %arg2, 30
272 %tmp4 = zext i32 %tmp3 to i64
273 %tmp5 = shl i64 %tmp, %tmp4
274 %tmp6 = trunc i64 %tmp5 to i32
275 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
276 ret void
277}
278
279; GCN-LABEL: {{^}}trunc_shl_wrong_and63:
280; Negative test, wrong constant
281; GCN: v_lshl_b64
282define amdgpu_kernel void @trunc_shl_wrong_and63(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
283bb:
284 %tmp = load i64, i64 addrspace(1)* %arg, align 8
285 %tmp3 = and i32 %arg2, 63
286 %tmp4 = zext i32 %tmp3 to i64
287 %tmp5 = shl i64 %tmp, %tmp4
288 %tmp6 = trunc i64 %tmp5 to i32
289 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
290 ret void
291}
292
293; GCN-LABEL: {{^}}trunc_shl_no_and:
294; Negative test, shift can be full 64 bit
295; GCN: v_lshl_b64
296define amdgpu_kernel void @trunc_shl_no_and(i64 addrspace(1)* nocapture readonly %arg, i32 addrspace(1)* nocapture %arg1, i32 %arg2) {
297bb:
298 %tmp = load i64, i64 addrspace(1)* %arg, align 8
299 %tmp4 = zext i32 %arg2 to i64
300 %tmp5 = shl i64 %tmp, %tmp4
301 %tmp6 = trunc i64 %tmp5 to i32
302 store i32 %tmp6, i32 addrspace(1)* %arg1, align 4
303 ret void
304}
305
306; GCN-LABEL: {{^}}trunc_shl_vec_vec:
307; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 3, v{{[0-9]+}}
308; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 4, v{{[0-9]+}}
309; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 5, v{{[0-9]+}}
310; GCN-DAG: v_lshlrev_b32_e32 v{{[0-9]+}}, 6, v{{[0-9]+}}
311; GCN-NOT: v_lshl_b64
312; GCN-NOT: v_lshlrev_b64
313define amdgpu_kernel void @trunc_shl_vec_vec(<4 x i64> addrspace(1)* %arg) {
314bb:
315 %v = load <4 x i64>, <4 x i64> addrspace(1)* %arg, align 32
316 %shl = shl <4 x i64> %v, <i64 3, i64 4, i64 5, i64 6>
317 store <4 x i64> %shl, <4 x i64> addrspace(1)* %arg, align 32
318 ret void
319}