blob: ff666cc3653b29ac33bd62ee4552f2091b4c0144 [file] [log] [blame]
Matt Arsenault3ea06332017-02-22 00:02:21 +00001; RUN: llc -march=amdgcn -mcpu=verde -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s
2; XUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=VI -check-prefix=FUNC %s
3; RUN: llc -march=r600 -mcpu=redwood < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s
Matt Arsenault24692112015-07-14 18:20:33 +00004
5declare i32 @llvm.r600.read.tidig.x() #0
6
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +00007declare i32 @llvm.r600.read.tgid.x() #0
8
Tom Stellard4489b852013-05-03 17:21:31 +00009
Marek Olsak37cd4d02015-02-03 21:53:27 +000010;EG: {{^}}shl_v2i32:
11;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
12;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Tom Stellard4489b852013-05-03 17:21:31 +000013
Marek Olsak37cd4d02015-02-03 21:53:27 +000014;SI: {{^}}shl_v2i32:
15;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
16;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry55845532013-06-25 13:55:32 +000017
Marek Olsak37cd4d02015-02-03 21:53:27 +000018;VI: {{^}}shl_v2i32:
19;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
20;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Marek Olsak191507e2015-02-03 17:38:12 +000021
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000022define amdgpu_kernel void @shl_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000023 %b_ptr = getelementptr <2 x i32>, <2 x i32> addrspace(1)* %in, i32 1
Matt Arsenault94163282016-12-22 16:36:25 +000024 %a = load <2 x i32>, <2 x i32> addrspace(1)* %in
25 %b = load <2 x i32>, <2 x i32> addrspace(1)* %b_ptr
Aaron Watry55845532013-06-25 13:55:32 +000026 %result = shl <2 x i32> %a, %b
27 store <2 x i32> %result, <2 x i32> addrspace(1)* %out
28 ret void
29}
30
Marek Olsak37cd4d02015-02-03 21:53:27 +000031;EG: {{^}}shl_v4i32:
32;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
33;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
34;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
35;EG: LSHL {{\*? *}}T{{[0-9]+\.[XYZW], T[0-9]+\.[XYZW], T[0-9]+\.[XYZW]}}
Aaron Watry55845532013-06-25 13:55:32 +000036
Marek Olsak37cd4d02015-02-03 21:53:27 +000037;SI: {{^}}shl_v4i32:
38;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
39;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
40;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
41;SI: v_lshl_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Aaron Watry55845532013-06-25 13:55:32 +000042
Marek Olsak37cd4d02015-02-03 21:53:27 +000043;VI: {{^}}shl_v4i32:
44;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
45;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
46;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
47;VI: v_lshlrev_b32_e32 v{{[0-9]+, v[0-9]+, v[0-9]+}}
Marek Olsak191507e2015-02-03 17:38:12 +000048
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000049define amdgpu_kernel void @shl_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +000050 %b_ptr = getelementptr <4 x i32>, <4 x i32> addrspace(1)* %in, i32 1
Matt Arsenault94163282016-12-22 16:36:25 +000051 %a = load <4 x i32>, <4 x i32> addrspace(1)* %in
52 %b = load <4 x i32>, <4 x i32> addrspace(1)* %b_ptr
Tom Stellard4489b852013-05-03 17:21:31 +000053 %result = shl <4 x i32> %a, %b
54 store <4 x i32> %result, <4 x i32> addrspace(1)* %out
55 ret void
56}
Tom Stellard1cfd7a52013-05-20 15:02:12 +000057
Matt Arsenault94163282016-12-22 16:36:25 +000058; GCN-LABEL: {{^}}shl_i16:
59; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Tom Stellard115a6152016-11-10 16:02:37 +000060
Matt Arsenault94163282016-12-22 16:36:25 +000061; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000062define amdgpu_kernel void @shl_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
Tom Stellard115a6152016-11-10 16:02:37 +000063 %b_ptr = getelementptr i16, i16 addrspace(1)* %in, i16 1
Matt Arsenault94163282016-12-22 16:36:25 +000064 %a = load i16, i16 addrspace(1)* %in
65 %b = load i16, i16 addrspace(1)* %b_ptr
Tom Stellard115a6152016-11-10 16:02:37 +000066 %result = shl i16 %a, %b
67 store i16 %result, i16 addrspace(1)* %out
68 ret void
69}
70
Matt Arsenault94163282016-12-22 16:36:25 +000071; GCN-LABEL: {{^}}shl_i16_v_s:
72; VI: v_lshlrev_b32_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
Tom Stellard115a6152016-11-10 16:02:37 +000073
Matt Arsenault94163282016-12-22 16:36:25 +000074; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000075define amdgpu_kernel void @shl_i16_v_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
Matt Arsenault94163282016-12-22 16:36:25 +000076 %a = load i16, i16 addrspace(1)* %in
77 %result = shl i16 %a, %b
78 store i16 %result, i16 addrspace(1)* %out
79 ret void
80}
Tom Stellard115a6152016-11-10 16:02:37 +000081
Matt Arsenault94163282016-12-22 16:36:25 +000082; GCN-LABEL: {{^}}shl_i16_v_compute_s:
83; SI: v_lshlrev_b32_e32 v{{[0-9]+}}, s{{[0-9]+}}, v{{[0-9]+}}
84
85; VI: v_lshlrev_b16_e64 v{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000086define amdgpu_kernel void @shl_i16_v_compute_s(i16 addrspace(1)* %out, i16 addrspace(1)* %in, i16 %b) {
Matt Arsenault94163282016-12-22 16:36:25 +000087 %a = load i16, i16 addrspace(1)* %in
88 %b.add = add i16 %b, 3
89 %result = shl i16 %a, %b.add
90 store i16 %result, i16 addrspace(1)* %out
91 ret void
92}
93
94; GCN-LABEL: {{^}}shl_i16_computed_amount:
95; VI: v_add_u16_e32 [[ADD:v[0-9]+]], 3, v{{[0-9]+}}
96; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, [[ADD]], v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +000097define amdgpu_kernel void @shl_i16_computed_amount(i16 addrspace(1)* %out, i16 addrspace(1)* %in) {
Matt Arsenault94163282016-12-22 16:36:25 +000098 %tid = call i32 @llvm.r600.read.tidig.x() #0
99 %gep = getelementptr inbounds i16, i16 addrspace(1)* %in, i32 %tid
100 %gep.out = getelementptr inbounds i16, i16 addrspace(1)* %out, i32 %tid
101 %b_ptr = getelementptr i16, i16 addrspace(1)* %gep, i16 1
102 %a = load volatile i16, i16 addrspace(1)* %in
103 %b = load volatile i16, i16 addrspace(1)* %b_ptr
104 %b.add = add i16 %b, 3
105 %result = shl i16 %a, %b.add
106 store i16 %result, i16 addrspace(1)* %out
107 ret void
108}
109
110; GCN-LABEL: {{^}}shl_i16_i_s:
111; GCN: s_lshl_b32 s{{[0-9]+}}, s{{[0-9]+}}, 12
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000112define amdgpu_kernel void @shl_i16_i_s(i16 addrspace(1)* %out, i16 zeroext %a) {
Matt Arsenault94163282016-12-22 16:36:25 +0000113 %result = shl i16 %a, 12
114 store i16 %result, i16 addrspace(1)* %out
115 ret void
116}
117
118; GCN-LABEL: {{^}}shl_v2i16:
119; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
120; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000121define amdgpu_kernel void @shl_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %in) {
Matt Arsenault94163282016-12-22 16:36:25 +0000122 %tid = call i32 @llvm.r600.read.tidig.x() #0
123 %gep = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %in, i32 %tid
124 %gep.out = getelementptr inbounds <2 x i16>, <2 x i16> addrspace(1)* %out, i32 %tid
125 %b_ptr = getelementptr <2 x i16>, <2 x i16> addrspace(1)* %gep, i16 1
126 %a = load <2 x i16>, <2 x i16> addrspace(1)* %in
127 %b = load <2 x i16>, <2 x i16> addrspace(1)* %b_ptr
Tom Stellard115a6152016-11-10 16:02:37 +0000128 %result = shl <2 x i16> %a, %b
129 store <2 x i16> %result, <2 x i16> addrspace(1)* %out
130 ret void
131}
132
Matt Arsenault94163282016-12-22 16:36:25 +0000133; GCN-LABEL: {{^}}shl_v4i16:
134; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
135; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
136; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
137; VI: v_lshlrev_b16_e32 v{{[0-9]+}}, v{{[0-9]+}}, v{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000138define amdgpu_kernel void @shl_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> addrspace(1)* %in) {
Matt Arsenault94163282016-12-22 16:36:25 +0000139 %tid = call i32 @llvm.r600.read.tidig.x() #0
140 %gep = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %in, i32 %tid
141 %gep.out = getelementptr inbounds <4 x i16>, <4 x i16> addrspace(1)* %out, i32 %tid
142 %b_ptr = getelementptr <4 x i16>, <4 x i16> addrspace(1)* %gep, i16 1
143 %a = load <4 x i16>, <4 x i16> addrspace(1)* %gep
144 %b = load <4 x i16>, <4 x i16> addrspace(1)* %b_ptr
Tom Stellard115a6152016-11-10 16:02:37 +0000145 %result = shl <4 x i16> %a, %b
Matt Arsenault94163282016-12-22 16:36:25 +0000146 store <4 x i16> %result, <4 x i16> addrspace(1)* %gep.out
Tom Stellard115a6152016-11-10 16:02:37 +0000147 ret void
148}
149
Matthias Braun97d0ffb2015-12-04 01:51:19 +0000150;EG-LABEL: {{^}}shl_i64:
Marek Olsak37cd4d02015-02-03 21:53:27 +0000151;EG: SUB_INT {{\*? *}}[[COMPSH:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHIFT:T[0-9]+\.[XYZW]]]
152;EG: LSHR {{\* *}}[[TEMP:T[0-9]+\.[XYZW]]], [[OPLO:T[0-9]+\.[XYZW]]], {{[[COMPSH]]|PV.[XYZW]}}
Matthias Braun97d0ffb2015-12-04 01:51:19 +0000153;EG-DAG: ADD_INT {{\*? *}}[[BIGSH:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
154;EG-DAG: LSHR {{\*? *}}[[OVERF:T[0-9]+\.[XYZW]]], {{[[TEMP]]|PV.[XYZW]}}, 1
Marek Olsak37cd4d02015-02-03 21:53:27 +0000155;EG-DAG: LSHL {{\*? *}}[[HISMTMP:T[0-9]+\.[XYZW]]], [[OPHI:T[0-9]+\.[XYZW]]], [[SHIFT]]
Matthias Braun97d0ffb2015-12-04 01:51:19 +0000156;EG-DAG: OR_INT {{\*? *}}[[HISM:T[0-9]+\.[XYZW]]], {{[[HISMTMP]]|PV.[XYZW]|PS}}, {{[[OVERF]]|PV.[XYZW]}}
157;EG-DAG: LSHL {{\*? *}}[[LOSM:T[0-9]+\.[XYZW]]], [[OPLO]], {{PS|[[SHIFT]]|PV.[XYZW]}}
Marek Olsak37cd4d02015-02-03 21:53:27 +0000158;EG-DAG: SETGT_UINT {{\*? *}}[[RESC:T[0-9]+\.[XYZW]]], [[SHIFT]], literal
159;EG-DAG: CNDE_INT {{\*? *}}[[RESLO:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW]}}
160;EG-DAG: CNDE_INT {{\*? *}}[[RESHI:T[0-9]+\.[XYZW]]], {{T[0-9]+\.[XYZW], .*}}, 0.0
Jan Vesely25f36272014-06-18 12:27:13 +0000161
Matt Arsenault94163282016-12-22 16:36:25 +0000162; GCN-LABEL: {{^}}shl_i64:
163; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
164; VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000165define amdgpu_kernel void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +0000166 %b_ptr = getelementptr i64, i64 addrspace(1)* %in, i64 1
Matt Arsenault94163282016-12-22 16:36:25 +0000167 %a = load i64, i64 addrspace(1)* %in
168 %b = load i64, i64 addrspace(1)* %b_ptr
Jan Vesely25f36272014-06-18 12:27:13 +0000169 %result = shl i64 %a, %b
170 store i64 %result, i64 addrspace(1)* %out
171 ret void
172}
173
Matthias Braun97d0ffb2015-12-04 01:51:19 +0000174;EG-LABEL: {{^}}shl_v2i64:
Marek Olsak37cd4d02015-02-03 21:53:27 +0000175;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
176;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
177;EG-DAG: LSHR {{\*? *}}[[COMPSHA]]
178;EG-DAG: LSHR {{\*? *}}[[COMPSHB]]
179;EG-DAG: LSHR {{.*}}, 1
180;EG-DAG: LSHR {{.*}}, 1
181;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
182;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
183;EG-DAG: LSHL {{.*}}, [[SHA]]
184;EG-DAG: LSHL {{.*}}, [[SHB]]
185;EG-DAG: LSHL {{.*}}, [[SHA]]
186;EG-DAG: LSHL {{.*}}, [[SHB]]
187;EG-DAG: LSHL
188;EG-DAG: LSHL
189;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
190;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
191;EG-DAG: CNDE_INT {{.*}}, 0.0
192;EG-DAG: CNDE_INT {{.*}}, 0.0
193;EG-DAG: CNDE_INT
194;EG-DAG: CNDE_INT
Jan Vesely25f36272014-06-18 12:27:13 +0000195
Marek Olsak37cd4d02015-02-03 21:53:27 +0000196;SI: {{^}}shl_v2i64:
197;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
198;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
Jan Vesely25f36272014-06-18 12:27:13 +0000199
Marek Olsak37cd4d02015-02-03 21:53:27 +0000200;VI: {{^}}shl_v2i64:
201;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
202;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
Marek Olsak191507e2015-02-03 17:38:12 +0000203
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000204define amdgpu_kernel void @shl_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +0000205 %b_ptr = getelementptr <2 x i64>, <2 x i64> addrspace(1)* %in, i64 1
Matt Arsenault94163282016-12-22 16:36:25 +0000206 %a = load <2 x i64>, <2 x i64> addrspace(1)* %in
207 %b = load <2 x i64>, <2 x i64> addrspace(1)* %b_ptr
Jan Vesely25f36272014-06-18 12:27:13 +0000208 %result = shl <2 x i64> %a, %b
209 store <2 x i64> %result, <2 x i64> addrspace(1)* %out
210 ret void
211}
212
Marek Olsak37cd4d02015-02-03 21:53:27 +0000213;EG: {{^}}shl_v4i64:
214;EG-DAG: SUB_INT {{\*? *}}[[COMPSHA:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHA:T[0-9]+\.[XYZW]]]
215;EG-DAG: SUB_INT {{\*? *}}[[COMPSHB:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHB:T[0-9]+\.[XYZW]]]
216;EG-DAG: SUB_INT {{\*? *}}[[COMPSHC:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHC:T[0-9]+\.[XYZW]]]
217;EG-DAG: SUB_INT {{\*? *}}[[COMPSHD:T[0-9]+\.[XYZW]]], {{literal.[xy]}}, [[SHD:T[0-9]+\.[XYZW]]]
218;EG-DAG: LSHR {{\*? *}}[[COMPSHA]]
219;EG-DAG: LSHR {{\*? *}}[[COMPSHB]]
220;EG-DAG: LSHR {{\*? *}}[[COMPSHC]]
221;EG-DAG: LSHR {{\*? *}}[[COMPSHD]]
222;EG-DAG: LSHR {{.*}}, 1
223;EG-DAG: LSHR {{.*}}, 1
224;EG-DAG: LSHR {{.*}}, 1
225;EG-DAG: LSHR {{.*}}, 1
226;EG-DAG: ADD_INT {{\*? *}}[[BIGSHA:T[0-9]+\.[XYZW]]]{{.*}}, literal
227;EG-DAG: ADD_INT {{\*? *}}[[BIGSHB:T[0-9]+\.[XYZW]]]{{.*}}, literal
228;EG-DAG: ADD_INT {{\*? *}}[[BIGSHC:T[0-9]+\.[XYZW]]]{{.*}}, literal
229;EG-DAG: ADD_INT {{\*? *}}[[BIGSHD:T[0-9]+\.[XYZW]]]{{.*}}, literal
230;EG-DAG: LSHL {{.*}}, [[SHA]]
231;EG-DAG: LSHL {{.*}}, [[SHB]]
232;EG-DAG: LSHL {{.*}}, [[SHC]]
233;EG-DAG: LSHL {{.*}}, [[SHD]]
234;EG-DAG: LSHL {{.*}}, [[SHA]]
235;EG-DAG: LSHL {{.*}}, [[SHB]]
236;EG-DAG: LSHL {{.*}}, [[SHC]]
237;EG-DAG: LSHL {{.*}}, [[SHD]]
238;EG-DAG: LSHL
239;EG-DAG: LSHL
240;EG-DAG: LSHL
241;EG-DAG: LSHL
242;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHA]], literal
243;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHB]], literal
244;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHC]], literal
245;EG-DAG: SETGT_UINT {{\*? *T[0-9]\.[XYZW]}}, [[SHD]], literal
246;EG-DAG: CNDE_INT {{.*}}, 0.0
247;EG-DAG: CNDE_INT {{.*}}, 0.0
248;EG-DAG: CNDE_INT {{.*}}, 0.0
249;EG-DAG: CNDE_INT {{.*}}, 0.0
250;EG-DAG: CNDE_INT
251;EG-DAG: CNDE_INT
252;EG-DAG: CNDE_INT
253;EG-DAG: CNDE_INT
Jan Vesely25f36272014-06-18 12:27:13 +0000254
Marek Olsak37cd4d02015-02-03 21:53:27 +0000255;SI: {{^}}shl_v4i64:
256;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
257;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
258;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
259;SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], v[0-9]+}}
Jan Vesely25f36272014-06-18 12:27:13 +0000260
Marek Olsak37cd4d02015-02-03 21:53:27 +0000261;VI: {{^}}shl_v4i64:
262;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
263;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
264;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
265;VI: v_lshlrev_b64 {{v\[[0-9]+:[0-9]+\], v[0-9]+, v\[[0-9]+:[0-9]+\]}}
Marek Olsak191507e2015-02-03 17:38:12 +0000266
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000267define amdgpu_kernel void @shl_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %in) {
David Blaikie79e6c742015-02-27 19:29:02 +0000268 %b_ptr = getelementptr <4 x i64>, <4 x i64> addrspace(1)* %in, i64 1
Matt Arsenault94163282016-12-22 16:36:25 +0000269 %a = load <4 x i64>, <4 x i64> addrspace(1)* %in
270 %b = load <4 x i64>, <4 x i64> addrspace(1)* %b_ptr
Jan Vesely25f36272014-06-18 12:27:13 +0000271 %result = shl <4 x i64> %a, %b
272 store <4 x i64> %result, <4 x i64> addrspace(1)* %out
273 ret void
274}
Matt Arsenault24692112015-07-14 18:20:33 +0000275
276; Make sure load width gets reduced to i32 load.
277; GCN-LABEL: {{^}}s_shl_32_i64:
278; GCN-DAG: s_load_dword [[LO_A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb{{$}}
Matt Arsenault0de924b2015-11-02 23:15:42 +0000279; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}}
Matt Arsenault24692112015-07-14 18:20:33 +0000280; GCN-DAG: v_mov_b32_e32 v[[VHI:[0-9]+]], [[LO_A]]
281; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[VHI]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000282define amdgpu_kernel void @s_shl_32_i64(i64 addrspace(1)* %out, i64 %a) {
Matt Arsenault24692112015-07-14 18:20:33 +0000283 %result = shl i64 %a, 32
284 store i64 %result, i64 addrspace(1)* %out
285 ret void
286}
287
288; GCN-LABEL: {{^}}v_shl_32_i64:
289; GCN-DAG: buffer_load_dword v[[LO_A:[0-9]+]],
290; GCN-DAG: v_mov_b32_e32 v[[VLO:[0-9]+]], 0{{$}}
291; GCN: buffer_store_dwordx2 v{{\[}}[[VLO]]:[[LO_A]]{{\]}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000292define amdgpu_kernel void @v_shl_32_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %in) {
Stanislav Mekhanoshinc90347d2017-04-12 20:48:56 +0000293 %tid = call i32 @llvm.r600.read.tgid.x() #0
Matt Arsenault24692112015-07-14 18:20:33 +0000294 %gep.in = getelementptr i64, i64 addrspace(1)* %in, i32 %tid
295 %gep.out = getelementptr i64, i64 addrspace(1)* %out, i32 %tid
296 %a = load i64, i64 addrspace(1)* %gep.in
297 %result = shl i64 %a, 32
298 store i64 %result, i64 addrspace(1)* %gep.out
299 ret void
300}
301
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000302; FUNC-LABEL: {{^}}s_shl_constant_i64
303; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000304define amdgpu_kernel void @s_shl_constant_i64(i64 addrspace(1)* %out, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000305 %shl = shl i64 281474976710655, %a
306 store i64 %shl, i64 addrspace(1)* %out, align 8
307 ret void
308}
309
310; FUNC-LABEL: {{^}}v_shl_constant_i64:
311; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
312; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0xab19b207
313; SI-DAG: s_movk_i32 s[[KHI:[0-9]+]], 0x11e{{$}}
314; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
315; SI: buffer_store_dwordx2
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000316define amdgpu_kernel void @v_shl_constant_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000317 %a = load i64, i64 addrspace(1)* %aptr, align 8
318 %shl = shl i64 1231231234567, %a
319 store i64 %shl, i64 addrspace(1)* %out, align 8
320 ret void
321}
322
323; FUNC-LABEL: {{^}}v_shl_i64_32_bit_constant:
324; SI-DAG: buffer_load_dword [[VAL:v[0-9]+]]
325; SI-DAG: s_mov_b32 s[[KLO:[0-9]+]], 0x12d687{{$}}
326; SI-DAG: s_mov_b32 s[[KHI:[0-9]+]], 0{{$}}
327; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, s{{\[}}[[KLO]]:[[KHI]]{{\]}}, [[VAL]]
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000328define amdgpu_kernel void @v_shl_i64_32_bit_constant(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000329 %a = load i64, i64 addrspace(1)* %aptr, align 8
330 %shl = shl i64 1234567, %a
331 store i64 %shl, i64 addrspace(1)* %out, align 8
332 ret void
333}
334
335; FUNC-LABEL: {{^}}v_shl_inline_imm_64_i64:
336; SI: v_lshl_b64 {{v\[[0-9]+:[0-9]+\]}}, 64, {{v[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000337define amdgpu_kernel void @v_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000338 %a = load i64, i64 addrspace(1)* %aptr, align 8
339 %shl = shl i64 64, %a
340 store i64 %shl, i64 addrspace(1)* %out, align 8
341 ret void
342}
343
344; FUNC-LABEL: {{^}}s_shl_inline_imm_64_i64:
345; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 64, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000346define amdgpu_kernel void @s_shl_inline_imm_64_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000347 %shl = shl i64 64, %a
348 store i64 %shl, i64 addrspace(1)* %out, align 8
349 ret void
350}
351
352; FUNC-LABEL: {{^}}s_shl_inline_imm_1_i64:
353; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000354define amdgpu_kernel void @s_shl_inline_imm_1_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000355 %shl = shl i64 1, %a
356 store i64 %shl, i64 addrspace(1)* %out, align 8
357 ret void
358}
359
360; FUNC-LABEL: {{^}}s_shl_inline_imm_1.0_i64:
361; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 1.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000362define amdgpu_kernel void @s_shl_inline_imm_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000363 %shl = shl i64 4607182418800017408, %a
364 store i64 %shl, i64 addrspace(1)* %out, align 8
365 ret void
366}
367
368; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_1.0_i64:
369; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -1.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000370define amdgpu_kernel void @s_shl_inline_imm_neg_1.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000371 %shl = shl i64 13830554455654793216, %a
372 store i64 %shl, i64 addrspace(1)* %out, align 8
373 ret void
374}
375
376; FUNC-LABEL: {{^}}s_shl_inline_imm_0.5_i64:
377; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 0.5, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000378define amdgpu_kernel void @s_shl_inline_imm_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000379 %shl = shl i64 4602678819172646912, %a
380 store i64 %shl, i64 addrspace(1)* %out, align 8
381 ret void
382}
383
384; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_0.5_i64:
385; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -0.5, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000386define amdgpu_kernel void @s_shl_inline_imm_neg_0.5_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000387 %shl = shl i64 13826050856027422720, %a
388 store i64 %shl, i64 addrspace(1)* %out, align 8
389 ret void
390}
391
392; FUNC-LABEL: {{^}}s_shl_inline_imm_2.0_i64:
393; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 2.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000394define amdgpu_kernel void @s_shl_inline_imm_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000395 %shl = shl i64 4611686018427387904, %a
396 store i64 %shl, i64 addrspace(1)* %out, align 8
397 ret void
398}
399
400; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_2.0_i64:
401; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -2.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000402define amdgpu_kernel void @s_shl_inline_imm_neg_2.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000403 %shl = shl i64 13835058055282163712, %a
404 store i64 %shl, i64 addrspace(1)* %out, align 8
405 ret void
406}
407
408; FUNC-LABEL: {{^}}s_shl_inline_imm_4.0_i64:
409; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, 4.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000410define amdgpu_kernel void @s_shl_inline_imm_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000411 %shl = shl i64 4616189618054758400, %a
412 store i64 %shl, i64 addrspace(1)* %out, align 8
413 ret void
414}
415
416; FUNC-LABEL: {{^}}s_shl_inline_imm_neg_4.0_i64:
417; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, -4.0, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000418define amdgpu_kernel void @s_shl_inline_imm_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000419 %shl = shl i64 13839561654909534208, %a
420 store i64 %shl, i64 addrspace(1)* %out, align 8
421 ret void
422}
423
424
425; Test with the 64-bit integer bitpattern for a 32-bit float in the
426; low 32-bits, which is not a valid 64-bit inline immmediate.
427
428; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_4.0_i64:
429; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 4.0
430; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 0{{$}}
431; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000432define amdgpu_kernel void @s_shl_inline_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000433 %shl = shl i64 1082130432, %a
434 store i64 %shl, i64 addrspace(1)* %out, align 8
435 ret void
436}
437
438; FIXME: Copy of -1 register
439; FUNC-LABEL: {{^}}s_shl_inline_imm_f32_neg_4.0_i64:
440; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], -4.0
441; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -1{{$}}
442; SI-DAG: s_mov_b32 s[[K_HI_COPY:[0-9]+]], s[[K_HI]]
443; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI_COPY]]{{\]}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000444define amdgpu_kernel void @s_shl_inline_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000445 %shl = shl i64 -1065353216, %a
446 store i64 %shl, i64 addrspace(1)* %out, align 8
447 ret void
448}
449
450; Shift into upper 32-bits
451; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_4.0_i64:
452; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], 4.0
453; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
454; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000455define amdgpu_kernel void @s_shl_inline_high_imm_f32_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000456 %shl = shl i64 4647714815446351872, %a
457 store i64 %shl, i64 addrspace(1)* %out, align 8
458 ret void
459}
460
461; FUNC-LABEL: {{^}}s_shl_inline_high_imm_f32_neg_4.0_i64:
462; SI-DAG: s_mov_b32 s[[K_HI:[0-9]+]], -4.0
463; SI-DAG: s_mov_b32 s[[K_LO:[0-9]+]], 0{{$}}
464; SI: s_lshl_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[}}[[K_LO]]:[[K_HI]]{{\]}}, s{{[0-9]+}}
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000465define amdgpu_kernel void @s_shl_inline_high_imm_f32_neg_4.0_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 %a) {
Matt Arsenault6e3a4512016-01-18 22:01:13 +0000466 %shl = shl i64 13871086852301127680, %a
467 store i64 %shl, i64 addrspace(1)* %out, align 8
468 ret void
469}
470
Matt Arsenault3ea06332017-02-22 00:02:21 +0000471; FUNC-LABEL: {{^}}test_mul2:
472; GCN: s_lshl_b32 s{{[0-9]}}, s{{[0-9]}}, 1
Matt Arsenault3dbeefa2017-03-21 21:39:51 +0000473define amdgpu_kernel void @test_mul2(i32 %p) {
Matt Arsenault3ea06332017-02-22 00:02:21 +0000474 %i = mul i32 %p, 2
475 store volatile i32 %i, i32 addrspace(1)* undef
476 ret void
477}
478
Matt Arsenault24692112015-07-14 18:20:33 +0000479attributes #0 = { nounwind readnone }