blob: 3f3871ec5c3317ecd30f97165f35141546e4ea34 [file] [log] [blame]
Simon Pilgrim476560a2016-10-18 19:28:12 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
4
5; fold (shl 0, x) -> 0
6define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
7; SSE-LABEL: combine_vec_shl_zero:
8; SSE: # BB#0:
9; SSE-NEXT: xorps %xmm0, %xmm0
10; SSE-NEXT: retq
11;
12; AVX-LABEL: combine_vec_shl_zero:
13; AVX: # BB#0:
14; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
15; AVX-NEXT: vpsllvd %xmm0, %xmm1, %xmm0
16; AVX-NEXT: retq
17 %1 = shl <4 x i32> zeroinitializer, %x
18 ret <4 x i32> %1
19}
20
21; fold (shl x, c >= size(x)) -> undef
22define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
23; SSE-LABEL: combine_vec_shl_outofrange0:
24; SSE: # BB#0:
25; SSE-NEXT: retq
26;
27; AVX-LABEL: combine_vec_shl_outofrange0:
28; AVX: # BB#0:
29; AVX-NEXT: retq
30 %1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 ret <4 x i32> %1
32}
33
34define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
35; SSE-LABEL: combine_vec_shl_outofrange1:
36; SSE: # BB#0:
37; SSE-NEXT: retq
38;
39; AVX-LABEL: combine_vec_shl_outofrange1:
40; AVX: # BB#0:
41; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
42; AVX-NEXT: retq
43 %1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
44 ret <4 x i32> %1
45}
46
47; fold (shl x, 0) -> x
48define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
49; SSE-LABEL: combine_vec_shl_by_zero:
50; SSE: # BB#0:
51; SSE-NEXT: retq
52;
53; AVX-LABEL: combine_vec_shl_by_zero:
54; AVX: # BB#0:
55; AVX-NEXT: retq
56 %1 = shl <4 x i32> %x, zeroinitializer
57 ret <4 x i32> %1
58}
59
60; if (shl x, c) is known to be zero, return 0
61define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
62; SSE-LABEL: combine_vec_shl_known_zero0:
63; SSE: # BB#0:
64; SSE-NEXT: pxor %xmm1, %xmm1
65; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
66; SSE-NEXT: pslld $16, %xmm0
67; SSE-NEXT: retq
68;
69; AVX-LABEL: combine_vec_shl_known_zero0:
70; AVX: # BB#0:
71; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
72; AVX-NEXT: vpblendw {{.*#+}} xmm0 = xmm1[0],xmm0[1],xmm1[2],xmm0[3],xmm1[4],xmm0[5],xmm1[6],xmm0[7]
73; AVX-NEXT: vpslld $16, %xmm0, %xmm0
74; AVX-NEXT: retq
75 %1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
76 %2 = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
77 ret <4 x i32> %2
78}
79
80define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
81; SSE-LABEL: combine_vec_shl_known_zero1:
82; SSE: # BB#0:
83; SSE-NEXT: pand {{.*}}(%rip), %xmm0
84; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
85; SSE-NEXT: retq
86;
87; AVX-LABEL: combine_vec_shl_known_zero1:
88; AVX: # BB#0:
89; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
90; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
91; AVX-NEXT: retq
92 %1 = and <4 x i32> %x, <i32 4294901760, i32 8589803520, i32 17179607040, i32 34359214080>
93 %2 = shl <4 x i32> %1, <i32 16, i32 15, i32 14, i32 13>
94 ret <4 x i32> %2
95}
96
97; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
98define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
99; SSE-LABEL: combine_vec_shl_trunc_and:
100; SSE: # BB#0:
Simon Pilgrim476560a2016-10-18 19:28:12 +0000101; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
102; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
103; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
Simon Pilgrimb2ca2502016-10-19 08:57:37 +0000104; SSE-NEXT: pand {{.*}}(%rip), %xmm1
Simon Pilgrim476560a2016-10-18 19:28:12 +0000105; SSE-NEXT: pslld $23, %xmm1
106; SSE-NEXT: paddd {{.*}}(%rip), %xmm1
107; SSE-NEXT: cvttps2dq %xmm1, %xmm1
108; SSE-NEXT: pmulld %xmm1, %xmm0
109; SSE-NEXT: retq
110;
111; AVX-LABEL: combine_vec_shl_trunc_and:
112; AVX: # BB#0:
Simon Pilgrim476560a2016-10-18 19:28:12 +0000113; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
114; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
Simon Pilgrimb2ca2502016-10-19 08:57:37 +0000115; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
Simon Pilgrim476560a2016-10-18 19:28:12 +0000116; AVX-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
117; AVX-NEXT: vzeroupper
118; AVX-NEXT: retq
119 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
120 %2 = trunc <4 x i64> %1 to <4 x i32>
121 %3 = shl <4 x i32> %x, %2
122 ret <4 x i32> %3
123}
124
125; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
126define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
127; SSE-LABEL: combine_vec_shl_shl0:
128; SSE: # BB#0:
129; SSE-NEXT: pslld $6, %xmm0
130; SSE-NEXT: retq
131;
132; AVX-LABEL: combine_vec_shl_shl0:
133; AVX: # BB#0:
134; AVX-NEXT: vpslld $6, %xmm0, %xmm0
135; AVX-NEXT: retq
136 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
137 %2 = shl <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
138 ret <4 x i32> %2
139}
140
141define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
142; SSE-LABEL: combine_vec_shl_shl1:
143; SSE: # BB#0:
144; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
145; SSE-NEXT: retq
146;
147; AVX-LABEL: combine_vec_shl_shl1:
148; AVX: # BB#0:
149; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
150; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
151; AVX-NEXT: retq
152 %1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
153 %2 = shl <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
154 ret <4 x i32> %2
155}
156
157; fold (shl (shl x, c1), c2) -> 0
158define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
159; SSE-LABEL: combine_vec_shl_shlr_zero0:
160; SSE: # BB#0:
161; SSE-NEXT: xorps %xmm0, %xmm0
162; SSE-NEXT: retq
163;
164; AVX-LABEL: combine_vec_shl_shlr_zero0:
165; AVX: # BB#0:
166; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
167; AVX-NEXT: retq
168 %1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
169 %2 = shl <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
170 ret <4 x i32> %2
171}
172
173define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
174; SSE-LABEL: combine_vec_shl_shl_zero1:
175; SSE: # BB#0:
176; SSE-NEXT: xorps %xmm0, %xmm0
177; SSE-NEXT: retq
178;
179; AVX-LABEL: combine_vec_shl_shl_zero1:
180; AVX: # BB#0:
181; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
182; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
183; AVX-NEXT: retq
184 %1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
185 %2 = shl <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
186 ret <4 x i32> %2
187}
188
189; fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
190define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
191; SSE-LABEL: combine_vec_shl_ext_shl0:
192; SSE: # BB#0:
193; SSE-NEXT: pmovsxwd %xmm0, %xmm2
194; SSE-NEXT: pslld $20, %xmm2
195; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
196; SSE-NEXT: pmovsxwd %xmm0, %xmm1
197; SSE-NEXT: pslld $20, %xmm1
198; SSE-NEXT: movdqa %xmm2, %xmm0
199; SSE-NEXT: retq
200;
201; AVX-LABEL: combine_vec_shl_ext_shl0:
202; AVX: # BB#0:
203; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
204; AVX-NEXT: vpslld $20, %ymm0, %ymm0
205; AVX-NEXT: retq
206 %1 = shl <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
207 %2 = sext <8 x i16> %1 to <8 x i32>
208 %3 = shl <8 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
209 ret <8 x i32> %3
210}
211
212define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
213; SSE-LABEL: combine_vec_shl_ext_shl1:
214; SSE: # BB#0:
215; SSE-NEXT: pmullw {{.*}}(%rip), %xmm0
216; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
217; SSE-NEXT: pmovsxwd %xmm1, %xmm1
218; SSE-NEXT: pmovsxwd %xmm0, %xmm0
219; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
220; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1
221; SSE-NEXT: retq
222;
223; AVX-LABEL: combine_vec_shl_ext_shl1:
224; AVX: # BB#0:
225; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
226; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
227; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
228; AVX-NEXT: retq
229 %1 = shl <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
230 %2 = sext <8 x i16> %1 to <8 x i32>
231 %3 = shl <8 x i32> %2, <i32 31, i32 31, i32 30, i32 30, i32 29, i32 29, i32 28, i32 28>
232 ret <8 x i32> %3
233}
234
235; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
236define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
237; SSE-LABEL: combine_vec_shl_zext_lshr0:
238; SSE: # BB#0:
239; SSE-NEXT: movdqa %xmm0, %xmm1
240; SSE-NEXT: pand {{.*}}(%rip), %xmm1
241; SSE-NEXT: pxor %xmm2, %xmm2
242; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
243; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
244; SSE-NEXT: retq
245;
246; AVX-LABEL: combine_vec_shl_zext_lshr0:
247; AVX: # BB#0:
248; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
249; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
250; AVX-NEXT: retq
251 %1 = lshr <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
252 %2 = zext <8 x i16> %1 to <8 x i32>
253 %3 = shl <8 x i32> %2, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
254 ret <8 x i32> %3
255}
256
257define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
258; SSE-LABEL: combine_vec_shl_zext_lshr1:
259; SSE: # BB#0:
260; SSE-NEXT: movdqa %xmm0, %xmm1
261; SSE-NEXT: psrlw $8, %xmm1
262; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3,4,5,6],xmm1[7]
263; SSE-NEXT: movdqa %xmm1, %xmm0
264; SSE-NEXT: psrlw $4, %xmm0
265; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3,4,5,6],xmm1[7]
266; SSE-NEXT: movdqa %xmm0, %xmm2
267; SSE-NEXT: psrlw $2, %xmm2
268; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm0[0],xmm2[1,2],xmm0[3,4],xmm2[5,6],xmm0[7]
269; SSE-NEXT: movdqa %xmm2, %xmm1
270; SSE-NEXT: psrlw $1, %xmm1
271; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3],xmm1[4],xmm2[5],xmm1[6],xmm2[7]
272; SSE-NEXT: pxor %xmm2, %xmm2
273; SSE-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
274; SSE-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
275; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
276; SSE-NEXT: pmulld {{.*}}(%rip), %xmm1
277; SSE-NEXT: retq
278;
279; AVX-LABEL: combine_vec_shl_zext_lshr1:
280; AVX: # BB#0:
281; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
282; AVX-NEXT: vmovdqa {{.*#+}} ymm1 = [1,2,3,4,5,6,7,8]
283; AVX-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
284; AVX-NEXT: vpshufb {{.*#+}} ymm0 = ymm0[0,1,4,5,8,9,12,13],zero,zero,zero,zero,zero,zero,zero,zero,ymm0[16,17,20,21,24,25,28,29],zero,zero,zero,zero,zero,zero,zero,zero
285; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
286; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
287; AVX-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
288; AVX-NEXT: retq
289 %1 = lshr <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
290 %2 = zext <8 x i16> %1 to <8 x i32>
291 %3 = shl <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
292 ret <8 x i32> %3
293}
294
295; fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
296define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
297; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
298; SSE: # BB#0:
299; SSE-NEXT: pslld $2, %xmm0
300; SSE-NEXT: retq
301;
302; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
303; AVX: # BB#0:
304; AVX-NEXT: vpslld $2, %xmm0, %xmm0
305; AVX-NEXT: retq
306 %1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
307 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
308 ret <4 x i32> %2
309}
310
311define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
312; SSE-LABEL: combine_vec_shl_ge_ashr_extact1:
313; SSE: # BB#0:
314; SSE-NEXT: movdqa %xmm0, %xmm1
315; SSE-NEXT: psrad $8, %xmm1
316; SSE-NEXT: movdqa %xmm0, %xmm2
317; SSE-NEXT: psrad $4, %xmm2
318; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
319; SSE-NEXT: movdqa %xmm0, %xmm1
320; SSE-NEXT: psrad $5, %xmm1
321; SSE-NEXT: psrad $3, %xmm0
322; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
323; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
324; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
325; SSE-NEXT: retq
326;
327; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
328; AVX: # BB#0:
329; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
330; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
331; AVX-NEXT: retq
332 %1 = ashr exact <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
333 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
334 ret <4 x i32> %2
335}
336
337; fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
338define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
339; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
340; SSE: # BB#0:
341; SSE-NEXT: psrad $2, %xmm0
342; SSE-NEXT: retq
343;
344; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
345; AVX: # BB#0:
346; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
347; AVX-NEXT: retq
348 %1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
349 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
350 ret <4 x i32> %2
351}
352
353define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
354; SSE-LABEL: combine_vec_shl_lt_ashr_extact1:
355; SSE: # BB#0:
356; SSE-NEXT: movdqa %xmm0, %xmm1
357; SSE-NEXT: psrad $8, %xmm1
358; SSE-NEXT: movdqa %xmm0, %xmm2
359; SSE-NEXT: psrad $6, %xmm2
360; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
361; SSE-NEXT: movdqa %xmm0, %xmm1
362; SSE-NEXT: psrad $7, %xmm1
363; SSE-NEXT: psrad $5, %xmm0
364; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
365; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
366; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
367; SSE-NEXT: retq
368;
369; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
370; AVX: # BB#0:
371; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
372; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
373; AVX-NEXT: retq
374 %1 = ashr exact <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
375 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
376 ret <4 x i32> %2
377}
378
379; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
380define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
381; SSE-LABEL: combine_vec_shl_gt_lshr0:
382; SSE: # BB#0:
383; SSE-NEXT: pslld $2, %xmm0
384; SSE-NEXT: pand {{.*}}(%rip), %xmm0
385; SSE-NEXT: retq
386;
387; AVX-LABEL: combine_vec_shl_gt_lshr0:
388; AVX: # BB#0:
389; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
390; AVX-NEXT: vpslld $2, %xmm0, %xmm0
391; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
392; AVX-NEXT: retq
393 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
394 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
395 ret <4 x i32> %2
396}
397
398define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
399; SSE-LABEL: combine_vec_shl_gt_lshr1:
400; SSE: # BB#0:
401; SSE-NEXT: movdqa %xmm0, %xmm1
402; SSE-NEXT: psrld $8, %xmm1
403; SSE-NEXT: movdqa %xmm0, %xmm2
404; SSE-NEXT: psrld $4, %xmm2
405; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
406; SSE-NEXT: movdqa %xmm0, %xmm1
407; SSE-NEXT: psrld $5, %xmm1
408; SSE-NEXT: psrld $3, %xmm0
409; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
410; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
411; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
412; SSE-NEXT: retq
413;
414; AVX-LABEL: combine_vec_shl_gt_lshr1:
415; AVX: # BB#0:
416; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
417; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
418; AVX-NEXT: retq
419 %1 = lshr <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
420 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
421 ret <4 x i32> %2
422}
423
424; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
425define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
426; SSE-LABEL: combine_vec_shl_le_lshr0:
427; SSE: # BB#0:
428; SSE-NEXT: psrld $2, %xmm0
429; SSE-NEXT: pand {{.*}}(%rip), %xmm0
430; SSE-NEXT: retq
431;
432; AVX-LABEL: combine_vec_shl_le_lshr0:
433; AVX: # BB#0:
434; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
435; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
436; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
437; AVX-NEXT: retq
438 %1 = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
439 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
440 ret <4 x i32> %2
441}
442
443define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
444; SSE-LABEL: combine_vec_shl_le_lshr1:
445; SSE: # BB#0:
446; SSE-NEXT: movdqa %xmm0, %xmm1
447; SSE-NEXT: psrld $8, %xmm1
448; SSE-NEXT: movdqa %xmm0, %xmm2
449; SSE-NEXT: psrld $6, %xmm2
450; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
451; SSE-NEXT: movdqa %xmm0, %xmm1
452; SSE-NEXT: psrld $7, %xmm1
453; SSE-NEXT: psrld $5, %xmm0
454; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
455; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
456; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
457; SSE-NEXT: retq
458;
459; AVX-LABEL: combine_vec_shl_le_lshr1:
460; AVX: # BB#0:
461; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
462; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
463; AVX-NEXT: retq
464 %1 = lshr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
465 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
466 ret <4 x i32> %2
467}
468
469; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
470define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
471; SSE-LABEL: combine_vec_shl_ashr0:
472; SSE: # BB#0:
473; SSE-NEXT: andps {{.*}}(%rip), %xmm0
474; SSE-NEXT: retq
475;
476; AVX-LABEL: combine_vec_shl_ashr0:
477; AVX: # BB#0:
478; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
479; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
480; AVX-NEXT: retq
481 %1 = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
482 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
483 ret <4 x i32> %2
484}
485
486define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
487; SSE-LABEL: combine_vec_shl_ashr1:
488; SSE: # BB#0:
Simon Pilgrim4554e162016-10-19 16:15:30 +0000489; SSE-NEXT: andps {{.*}}(%rip), %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000490; SSE-NEXT: retq
491;
492; AVX-LABEL: combine_vec_shl_ashr1:
493; AVX: # BB#0:
Simon Pilgrim4554e162016-10-19 16:15:30 +0000494; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000495; AVX-NEXT: retq
496 %1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
497 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
498 ret <4 x i32> %2
499}
500
501; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
502define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
503; SSE-LABEL: combine_vec_shl_add0:
504; SSE: # BB#0:
505; SSE-NEXT: pslld $2, %xmm0
506; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
507; SSE-NEXT: retq
508;
509; AVX-LABEL: combine_vec_shl_add0:
510; AVX: # BB#0:
511; AVX-NEXT: vpslld $2, %xmm0, %xmm0
512; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
513; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
514; AVX-NEXT: retq
515 %1 = add <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
516 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
517 ret <4 x i32> %2
518}
519
520define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
521; SSE-LABEL: combine_vec_shl_add1:
522; SSE: # BB#0:
523; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
524; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
525; SSE-NEXT: retq
526;
527; AVX-LABEL: combine_vec_shl_add1:
528; AVX: # BB#0:
Simon Pilgrim476560a2016-10-18 19:28:12 +0000529; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrima20aeea2016-10-19 17:12:22 +0000530; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000531; AVX-NEXT: retq
532 %1 = add <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
533 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
534 ret <4 x i32> %2
535}
536
537; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
538define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
539; SSE-LABEL: combine_vec_shl_mul0:
540; SSE: # BB#0:
541; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
542; SSE-NEXT: retq
543;
544; AVX-LABEL: combine_vec_shl_mul0:
545; AVX: # BB#0:
546; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
547; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
548; AVX-NEXT: retq
549 %1 = mul <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
550 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
551 ret <4 x i32> %2
552}
553
554define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
555; SSE-LABEL: combine_vec_shl_mul1:
556; SSE: # BB#0:
557; SSE-NEXT: pmulld {{.*}}(%rip), %xmm0
558; SSE-NEXT: retq
559;
560; AVX-LABEL: combine_vec_shl_mul1:
561; AVX: # BB#0:
562; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000563; AVX-NEXT: retq
564 %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
565 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
566 ret <4 x i32> %2
567}