blob: b35eefaaa07559301f8a78516a6a3403e9f0a811 [file] [log] [blame]
Matt Arsenault4f6318f2017-11-06 17:04:37 +00001; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI,FUNC %s
2; RUN: llc -amdgpu-scalarize-global-loads=false -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,VI,FUNC %s
3; RUN: llc -amdgpu-scalarize-global-loads=false -march=r600 -mcpu=redwood < %s | FileCheck -check-prefixes=EG,FUNC %s
Tom Stellard3deddc52013-05-10 02:09:34 +00004
5; mul24 and mad24 are affected
Tom Stellard3deddc52013-05-10 02:09:34 +00006
Tom Stellard79243d92014-10-01 17:15:17 +00007; FUNC-LABEL: {{^}}test_mul_v2i32:
Matt Arsenault3e332a42014-06-05 08:00:36 +00008; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
9; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry265eef52013-06-25 13:55:26 +000010
Matt Arsenault4f6318f2017-11-06 17:04:37 +000011; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
12; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry265eef52013-06-25 13:55:26 +000013
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000014define amdgpu_kernel void @test_mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000015 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000016 %a = load <2 x i32>, <2 x i32> addrspace(1) * %in
17 %b = load <2 x i32>, <2 x i32> addrspace(1) * %b_ptr
Aaron Watry265eef52013-06-25 13:55:26 +000018 %result = mul <2 x i32> %a, %b
19 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
20 ret void
21}
22
Tom Stellard79243d92014-10-01 17:15:17 +000023; FUNC-LABEL: {{^}}v_mul_v4i32:
Matt Arsenault3e332a42014-06-05 08:00:36 +000024; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
25; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
26; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
27; EG: MULLO_INT {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry265eef52013-06-25 13:55:26 +000028
Matt Arsenault4f6318f2017-11-06 17:04:37 +000029; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
30; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
31; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
32; GCN: v_mul_lo_i32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry265eef52013-06-25 13:55:26 +000033
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000034define amdgpu_kernel void @v_mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000035 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +000036 %a = load <4 x i32>, <4 x i32> addrspace(1) * %in
37 %b = load <4 x i32>, <4 x i32> addrspace(1) * %b_ptr
Tom Stellard3deddc52013-05-10 02:09:34 +000038 %result = mul <4 x i32> %a, %b
39 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
40 ret void
41}
Matt Arsenaultb517c812014-03-27 17:23:31 +000042
Tom Stellard79243d92014-10-01 17:15:17 +000043; FUNC-LABEL: {{^}}s_trunc_i64_mul_to_i32:
Matt Arsenault4f6318f2017-11-06 17:04:37 +000044; GCN: s_load_dword
45; GCN: s_load_dword
46; GCN: s_mul_i32
47; GCN: buffer_store_dword
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000048define amdgpu_kernel void @s_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 %a, i64 %b) {
Matt Arsenault869cd072014-09-03 23:24:35 +000049 %mul = mul i64 %b, %a
50 %trunc = trunc i64 %mul to i32
51 store i32 %trunc, i32 addrspace(1)* %out, align 8
52 ret void
53}
54
Tom Stellard79243d92014-10-01 17:15:17 +000055; FUNC-LABEL: {{^}}v_trunc_i64_mul_to_i32:
Matt Arsenault4f6318f2017-11-06 17:04:37 +000056; GCN: s_load_dword
57; GCN: s_load_dword
58; GCN: v_mul_lo_i32
59; GCN: buffer_store_dword
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000060define amdgpu_kernel void @v_trunc_i64_mul_to_i32(i32 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) nounwind {
David Blaikiea79ac142015-02-27 21:17:42 +000061 %a = load i64, i64 addrspace(1)* %aptr, align 8
62 %b = load i64, i64 addrspace(1)* %bptr, align 8
Matt Arsenaultb517c812014-03-27 17:23:31 +000063 %mul = mul i64 %b, %a
64 %trunc = trunc i64 %mul to i32
65 store i32 %trunc, i32 addrspace(1)* %out, align 8
66 ret void
67}
Tom Stellarda1a5d9a2014-04-11 16:12:01 +000068
69; This 64-bit multiply should just use MUL_HI and MUL_LO, since the top
70; 32-bits of both arguments are sign bits.
Tom Stellard79243d92014-10-01 17:15:17 +000071; FUNC-LABEL: {{^}}mul64_sext_c:
Tom Stellarda1a5d9a2014-04-11 16:12:01 +000072; EG-DAG: MULLO_INT
73; EG-DAG: MULHI_INT
Matt Arsenault4f6318f2017-11-06 17:04:37 +000074; GCN-DAG: s_mul_i32
75; GCN-DAG: v_mul_hi_i32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000076define amdgpu_kernel void @mul64_sext_c(i64 addrspace(1)* %out, i32 %in) {
Tom Stellarda1a5d9a2014-04-11 16:12:01 +000077entry:
78 %0 = sext i32 %in to i64
79 %1 = mul i64 %0, 80
80 store i64 %1, i64 addrspace(1)* %out
81 ret void
82}
83
Tom Stellard79243d92014-10-01 17:15:17 +000084; FUNC-LABEL: {{^}}v_mul64_sext_c:
Matt Arsenault869cd072014-09-03 23:24:35 +000085; EG-DAG: MULLO_INT
86; EG-DAG: MULHI_INT
Matt Arsenault4f6318f2017-11-06 17:04:37 +000087; GCN-DAG: v_mul_lo_i32
88; GCN-DAG: v_mul_hi_i32
89; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000090define amdgpu_kernel void @v_mul64_sext_c(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
David Blaikiea79ac142015-02-27 21:17:42 +000091 %val = load i32, i32 addrspace(1)* %in, align 4
Matt Arsenault869cd072014-09-03 23:24:35 +000092 %ext = sext i32 %val to i64
93 %mul = mul i64 %ext, 80
94 store i64 %mul, i64 addrspace(1)* %out, align 8
95 ret void
96}
97
Tom Stellard79243d92014-10-01 17:15:17 +000098; FUNC-LABEL: {{^}}v_mul64_sext_inline_imm:
Matt Arsenault4f6318f2017-11-06 17:04:37 +000099; GCN-DAG: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
100; GCN-DAG: v_mul_hi_i32 v{{[0-9]+}}, v{{[0-9]+}}, 9
101; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000102define amdgpu_kernel void @v_mul64_sext_inline_imm(i64 addrspace(1)* %out, i32 addrspace(1)* %in) {
David Blaikiea79ac142015-02-27 21:17:42 +0000103 %val = load i32, i32 addrspace(1)* %in, align 4
Matt Arsenault869cd072014-09-03 23:24:35 +0000104 %ext = sext i32 %val to i64
105 %mul = mul i64 %ext, 9
106 store i64 %mul, i64 addrspace(1)* %out, align 8
107 ret void
108}
109
Tom Stellard79243d92014-10-01 17:15:17 +0000110; FUNC-LABEL: {{^}}s_mul_i32:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000111; GCN: s_load_dword [[SRC0:s[0-9]+]],
112; GCN: s_load_dword [[SRC1:s[0-9]+]],
113; GCN: s_mul_i32 [[SRESULT:s[0-9]+]], [[SRC0]], [[SRC1]]
114; GCN: v_mov_b32_e32 [[VRESULT:v[0-9]+]], [[SRESULT]]
115; GCN: buffer_store_dword [[VRESULT]],
116; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000117define amdgpu_kernel void @s_mul_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) nounwind {
Matt Arsenault869cd072014-09-03 23:24:35 +0000118 %mul = mul i32 %a, %b
119 store i32 %mul, i32 addrspace(1)* %out, align 4
120 ret void
121}
122
Tom Stellard79243d92014-10-01 17:15:17 +0000123; FUNC-LABEL: {{^}}v_mul_i32:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000124; GCN: v_mul_lo_i32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000125define amdgpu_kernel void @v_mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +0000126 %b_ptr = getelementptr i32, i32 addrspace(1)* %in, i32 1
David Blaikiea79ac142015-02-27 21:17:42 +0000127 %a = load i32, i32 addrspace(1)* %in
128 %b = load i32, i32 addrspace(1)* %b_ptr
Matt Arsenault869cd072014-09-03 23:24:35 +0000129 %result = mul i32 %a, %b
130 store i32 %result, i32 addrspace(1)* %out
131 ret void
132}
133
Tom Stellarda1a5d9a2014-04-11 16:12:01 +0000134; A standard 64-bit multiply. The expansion should be around 6 instructions.
135; It would be difficult to match the expansion correctly without writing
136; a really complicated list of FileCheck expressions. I don't want
137; to confuse people who may 'break' this test with a correct optimization,
138; so this test just uses FUNC-LABEL to make sure the compiler does not
139; crash with a 'failed to select' error.
Matt Arsenault869cd072014-09-03 23:24:35 +0000140
Tom Stellard79243d92014-10-01 17:15:17 +0000141; FUNC-LABEL: {{^}}s_mul_i64:
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000142define amdgpu_kernel void @s_mul_i64(i64 addrspace(1)* %out, i64 %a, i64 %b) nounwind {
Matt Arsenault869cd072014-09-03 23:24:35 +0000143 %mul = mul i64 %a, %b
144 store i64 %mul, i64 addrspace(1)* %out, align 8
145 ret void
146}
147
Tom Stellard79243d92014-10-01 17:15:17 +0000148; FUNC-LABEL: {{^}}v_mul_i64:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000149; GCN: v_mul_lo_i32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000150define amdgpu_kernel void @v_mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr) {
David Blaikiea79ac142015-02-27 21:17:42 +0000151 %a = load i64, i64 addrspace(1)* %aptr, align 8
152 %b = load i64, i64 addrspace(1)* %bptr, align 8
Matt Arsenault869cd072014-09-03 23:24:35 +0000153 %mul = mul i64 %a, %b
154 store i64 %mul, i64 addrspace(1)* %out, align 8
155 ret void
156}
157
Tom Stellard79243d92014-10-01 17:15:17 +0000158; FUNC-LABEL: {{^}}mul32_in_branch:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000159; GCN: s_mul_i32
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000160define amdgpu_kernel void @mul32_in_branch(i32 addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b, i32 %c) {
Tom Stellarda1a5d9a2014-04-11 16:12:01 +0000161entry:
Matt Arsenault869cd072014-09-03 23:24:35 +0000162 %0 = icmp eq i32 %a, 0
163 br i1 %0, label %if, label %else
164
165if:
David Blaikiea79ac142015-02-27 21:17:42 +0000166 %1 = load i32, i32 addrspace(1)* %in
Matt Arsenault869cd072014-09-03 23:24:35 +0000167 br label %endif
168
169else:
170 %2 = mul i32 %a, %b
171 br label %endif
172
173endif:
174 %3 = phi i32 [%1, %if], [%2, %else]
175 store i32 %3, i32 addrspace(1)* %out
176 ret void
177}
178
Tom Stellard79243d92014-10-01 17:15:17 +0000179; FUNC-LABEL: {{^}}mul64_in_branch:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000180; GCN-DAG: s_mul_i32
181; GCN-DAG: v_mul_hi_u32
182; GCN: s_endpgm
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000183define amdgpu_kernel void @mul64_in_branch(i64 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %a, i64 %b, i64 %c) {
Matt Arsenault869cd072014-09-03 23:24:35 +0000184entry:
185 %0 = icmp eq i64 %a, 0
186 br i1 %0, label %if, label %else
187
188if:
David Blaikiea79ac142015-02-27 21:17:42 +0000189 %1 = load i64, i64 addrspace(1)* %in
Matt Arsenault869cd072014-09-03 23:24:35 +0000190 br label %endif
191
192else:
193 %2 = mul i64 %a, %b
194 br label %endif
195
196endif:
197 %3 = phi i64 [%1, %if], [%2, %else]
198 store i64 %3, i64 addrspace(1)* %out
Tom Stellarda1a5d9a2014-04-11 16:12:01 +0000199 ret void
200}
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000201
202; FIXME: Load dwordx4
203; FUNC-LABEL: {{^}}s_mul_i128:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000204; GCN: s_load_dwordx2
205; GCN: s_load_dwordx2
206; GCN: s_load_dwordx2
207; GCN: s_load_dwordx2
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000208
209; SI: v_mul_hi_u32
210; SI: v_mul_hi_u32
211; SI: s_mul_i32
212; SI: v_mul_hi_u32
213; SI: s_mul_i32
Matt Arsenault301162c2017-11-15 21:51:43 +0000214; SI: s_mul_i32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000215
Stanislav Mekhanoshin56ea4882017-05-30 16:49:24 +0000216; SI-DAG: s_mul_i32
217; SI-DAG: v_mul_hi_u32
218; SI-DAG: v_mul_hi_u32
219; SI-DAG: s_mul_i32
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000220; SI-DAG: s_mul_i32
221; SI-DAG: v_mul_hi_u32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000222
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000223; VI: s_mul_i32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000224; VI: v_mad_u64_u32
Matt Arsenault4709ab92017-11-08 00:48:25 +0000225; VI: s_mul_i32
226; VI: v_mul_hi_u32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000227; VI: v_mad_u64_u32
Matt Arsenault301162c2017-11-15 21:51:43 +0000228; VI: v_mul_hi_u32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000229; VI: v_mad_u64_u32
230
231
Matt Arsenault301162c2017-11-15 21:51:43 +0000232
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000233; GCN: buffer_store_dwordx4
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000234define amdgpu_kernel void @s_mul_i128(i128 addrspace(1)* %out, i128 %a, i128 %b) nounwind #0 {
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000235 %mul = mul i128 %a, %b
236 store i128 %mul, i128 addrspace(1)* %out
237 ret void
238}
239
240; FUNC-LABEL: {{^}}v_mul_i128:
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000241; GCN: {{buffer|flat}}_load_dwordx4
242; GCN: {{buffer|flat}}_load_dwordx4
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000243
Matt Arsenault301162c2017-11-15 21:51:43 +0000244; SI-DAG: v_mul_lo_i32
245; SI-DAG: v_mul_hi_u32
246; SI-DAG: v_mul_hi_u32
247; SI-DAG: v_mul_lo_i32
248; SI-DAG: v_mul_hi_u32
249; SI-DAG: v_mul_hi_u32
250; SI-DAG: v_mul_lo_i32
251; SI-DAG: v_mul_lo_i32
252; SI-DAG: v_add_i32_e32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000253
Stanislav Mekhanoshin582a5232017-02-15 17:19:50 +0000254; SI-DAG: v_mul_hi_u32
255; SI-DAG: v_mul_lo_i32
256; SI-DAG: v_mul_hi_u32
257; SI-DAG: v_mul_lo_i32
258; SI-DAG: v_mul_lo_i32
259; SI-DAG: v_mul_lo_i32
260; SI-DAG: v_mul_lo_i32
261; SI-DAG: v_mul_lo_i32
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000262
Matt Arsenault301162c2017-11-15 21:51:43 +0000263; VI-DAG: v_mul_lo_i32
264; VI-DAG: v_mul_hi_u32
265; VI: v_mad_u64_u32
Matt Arsenault4f6318f2017-11-06 17:04:37 +0000266; VI: v_mad_u64_u32
267; VI: v_mad_u64_u32
268
269; GCN: {{buffer|flat}}_store_dwordx4
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000270define amdgpu_kernel void @v_mul_i128(i128 addrspace(1)* %out, i128 addrspace(1)* %aptr, i128 addrspace(1)* %bptr) #0 {
Matt Arsenault38d8ed22016-12-09 17:49:14 +0000271 %tid = call i32 @llvm.r600.read.tidig.x()
272 %gep.a = getelementptr inbounds i128, i128 addrspace(1)* %aptr, i32 %tid
273 %gep.b = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
274 %gep.out = getelementptr inbounds i128, i128 addrspace(1)* %bptr, i32 %tid
275 %a = load i128, i128 addrspace(1)* %gep.a
276 %b = load i128, i128 addrspace(1)* %gep.b
277 %mul = mul i128 %a, %b
278 store i128 %mul, i128 addrspace(1)* %gep.out
279 ret void
280}
281
282declare i32 @llvm.r600.read.tidig.x() #1
283
284attributes #0 = { nounwind }
285attributes #1 = { nounwind readnone}