blob: b65a5c83bcf1bd1e04749072485d86ef4282fc9d [file] [log] [blame]
Simon Pilgrim476560a2016-10-18 19:28:12 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
4
5; fold (srl 0, x) -> 0
6define <4 x i32> @combine_vec_lshr_zero(<4 x i32> %x) {
7; SSE-LABEL: combine_vec_lshr_zero:
8; SSE: # BB#0:
9; SSE-NEXT: movdqa %xmm0, %xmm2
10; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
11; SSE-NEXT: pxor %xmm1, %xmm1
12; SSE-NEXT: pxor %xmm3, %xmm3
13; SSE-NEXT: psrld %xmm2, %xmm3
14; SSE-NEXT: movdqa %xmm0, %xmm2
15; SSE-NEXT: psrlq $32, %xmm2
16; SSE-NEXT: pxor %xmm4, %xmm4
17; SSE-NEXT: psrld %xmm2, %xmm4
18; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
19; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
20; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm1[2],xmm0[3],xmm1[3]
21; SSE-NEXT: pxor %xmm3, %xmm3
22; SSE-NEXT: psrld %xmm0, %xmm3
23; SSE-NEXT: psrld %xmm2, %xmm1
24; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm3[4,5,6,7]
25; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
26; SSE-NEXT: movdqa %xmm1, %xmm0
27; SSE-NEXT: retq
28;
29; AVX-LABEL: combine_vec_lshr_zero:
30; AVX: # BB#0:
31; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
32; AVX-NEXT: vpsrlvd %xmm0, %xmm1, %xmm0
33; AVX-NEXT: retq
34 %1 = lshr <4 x i32> zeroinitializer, %x
35 ret <4 x i32> %1
36}
37
38; fold (srl x, c >= size(x)) -> undef
39define <4 x i32> @combine_vec_lshr_outofrange0(<4 x i32> %x) {
40; SSE-LABEL: combine_vec_lshr_outofrange0:
41; SSE: # BB#0:
42; SSE-NEXT: retq
43;
44; AVX-LABEL: combine_vec_lshr_outofrange0:
45; AVX: # BB#0:
46; AVX-NEXT: retq
47 %1 = lshr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
48 ret <4 x i32> %1
49}
50
51define <4 x i32> @combine_vec_lshr_outofrange1(<4 x i32> %x) {
52; SSE-LABEL: combine_vec_lshr_outofrange1:
53; SSE: # BB#0:
54; SSE-NEXT: xorps %xmm0, %xmm0
55; SSE-NEXT: retq
56;
57; AVX-LABEL: combine_vec_lshr_outofrange1:
58; AVX: # BB#0:
59; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
60; AVX-NEXT: retq
61 %1 = lshr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
62 ret <4 x i32> %1
63}
64
65; fold (srl x, 0) -> x
66define <4 x i32> @combine_vec_lshr_by_zero(<4 x i32> %x) {
67; SSE-LABEL: combine_vec_lshr_by_zero:
68; SSE: # BB#0:
69; SSE-NEXT: retq
70;
71; AVX-LABEL: combine_vec_lshr_by_zero:
72; AVX: # BB#0:
73; AVX-NEXT: retq
74 %1 = lshr <4 x i32> %x, zeroinitializer
75 ret <4 x i32> %1
76}
77
78; if (srl x, c) is known to be zero, return 0
79define <4 x i32> @combine_vec_lshr_known_zero0(<4 x i32> %x) {
80; SSE-LABEL: combine_vec_lshr_known_zero0:
81; SSE: # BB#0:
Sanjay Patel9ca028c2016-10-23 23:13:31 +000082; SSE-NEXT: xorps %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +000083; SSE-NEXT: retq
84;
85; AVX-LABEL: combine_vec_lshr_known_zero0:
86; AVX: # BB#0:
Sanjay Patel9ca028c2016-10-23 23:13:31 +000087; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +000088; AVX-NEXT: retq
89 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
90 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
91 ret <4 x i32> %2
92}
93
94define <4 x i32> @combine_vec_lshr_known_zero1(<4 x i32> %x) {
95; SSE-LABEL: combine_vec_lshr_known_zero1:
96; SSE: # BB#0:
97; SSE-NEXT: pand {{.*}}(%rip), %xmm0
98; SSE-NEXT: movdqa %xmm0, %xmm1
99; SSE-NEXT: psrld $11, %xmm1
100; SSE-NEXT: movdqa %xmm0, %xmm2
101; SSE-NEXT: psrld $9, %xmm2
102; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
103; SSE-NEXT: movdqa %xmm0, %xmm1
104; SSE-NEXT: psrld $10, %xmm1
105; SSE-NEXT: psrld $8, %xmm0
106; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
107; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
108; SSE-NEXT: retq
109;
110; AVX-LABEL: combine_vec_lshr_known_zero1:
111; AVX: # BB#0:
112; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
113; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
114; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
115; AVX-NEXT: retq
116 %1 = and <4 x i32> %x, <i32 15, i32 15, i32 15, i32 15>
117 %2 = lshr <4 x i32> %1, <i32 8, i32 9, i32 10, i32 11>
118 ret <4 x i32> %2
119}
120
121; fold (srl (srl x, c1), c2) -> (srl x, (add c1, c2))
122define <4 x i32> @combine_vec_lshr_lshr0(<4 x i32> %x) {
123; SSE-LABEL: combine_vec_lshr_lshr0:
124; SSE: # BB#0:
125; SSE-NEXT: psrld $6, %xmm0
126; SSE-NEXT: retq
127;
128; AVX-LABEL: combine_vec_lshr_lshr0:
129; AVX: # BB#0:
130; AVX-NEXT: vpsrld $6, %xmm0, %xmm0
131; AVX-NEXT: retq
132 %1 = lshr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
133 %2 = lshr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
134 ret <4 x i32> %2
135}
136
137define <4 x i32> @combine_vec_lshr_lshr1(<4 x i32> %x) {
138; SSE-LABEL: combine_vec_lshr_lshr1:
139; SSE: # BB#0:
140; SSE-NEXT: movdqa %xmm0, %xmm2
141; SSE-NEXT: movdqa %xmm0, %xmm1
142; SSE-NEXT: psrld $2, %xmm1
143; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
144; SSE-NEXT: psrld $3, %xmm0
145; SSE-NEXT: psrld $1, %xmm2
146; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
147; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
148; SSE-NEXT: movdqa %xmm1, %xmm0
149; SSE-NEXT: psrld $7, %xmm0
150; SSE-NEXT: movdqa %xmm1, %xmm2
151; SSE-NEXT: psrld $5, %xmm2
152; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
153; SSE-NEXT: movdqa %xmm1, %xmm0
154; SSE-NEXT: psrld $6, %xmm0
155; SSE-NEXT: psrld $4, %xmm1
156; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm0[4,5,6,7]
157; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
158; SSE-NEXT: movdqa %xmm1, %xmm0
159; SSE-NEXT: retq
160;
161; AVX-LABEL: combine_vec_lshr_lshr1:
162; AVX: # BB#0:
163; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
164; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
165; AVX-NEXT: retq
166 %1 = lshr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
167 %2 = lshr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
168 ret <4 x i32> %2
169}
170
171; fold (srl (srl x, c1), c2) -> 0
172define <4 x i32> @combine_vec_lshr_lshr_zero0(<4 x i32> %x) {
173; SSE-LABEL: combine_vec_lshr_lshr_zero0:
174; SSE: # BB#0:
175; SSE-NEXT: xorps %xmm0, %xmm0
176; SSE-NEXT: retq
177;
178; AVX-LABEL: combine_vec_lshr_lshr_zero0:
179; AVX: # BB#0:
180; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
181; AVX-NEXT: retq
182 %1 = lshr <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
183 %2 = lshr <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
184 ret <4 x i32> %2
185}
186
187define <4 x i32> @combine_vec_lshr_lshr_zero1(<4 x i32> %x) {
188; SSE-LABEL: combine_vec_lshr_lshr_zero1:
189; SSE: # BB#0:
190; SSE-NEXT: movdqa %xmm0, %xmm1
191; SSE-NEXT: psrld $20, %xmm1
192; SSE-NEXT: movdqa %xmm0, %xmm2
193; SSE-NEXT: psrld $18, %xmm2
194; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
195; SSE-NEXT: movdqa %xmm0, %xmm1
196; SSE-NEXT: psrld $19, %xmm1
197; SSE-NEXT: psrld $17, %xmm0
198; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
199; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
200; SSE-NEXT: movdqa %xmm0, %xmm1
201; SSE-NEXT: psrld $28, %xmm1
202; SSE-NEXT: movdqa %xmm0, %xmm2
203; SSE-NEXT: psrld $26, %xmm2
204; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
205; SSE-NEXT: movdqa %xmm0, %xmm1
206; SSE-NEXT: psrld $27, %xmm1
207; SSE-NEXT: psrld $25, %xmm0
208; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
209; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
210; SSE-NEXT: retq
211;
212; AVX-LABEL: combine_vec_lshr_lshr_zero1:
213; AVX: # BB#0:
214; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
215; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
216; AVX-NEXT: retq
217 %1 = lshr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
218 %2 = lshr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
219 ret <4 x i32> %2
220}
221
222; fold (srl (trunc (srl x, c1)), c2) -> (trunc (srl x, (add c1, c2)))
223define <4 x i32> @combine_vec_lshr_trunc_lshr0(<4 x i64> %x) {
224; SSE-LABEL: combine_vec_lshr_trunc_lshr0:
225; SSE: # BB#0:
226; SSE-NEXT: psrlq $32, %xmm0
227; SSE-NEXT: psrlq $32, %xmm1
228; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
229; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
230; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
231; SSE-NEXT: psrld $16, %xmm0
232; SSE-NEXT: retq
233;
234; AVX-LABEL: combine_vec_lshr_trunc_lshr0:
235; AVX: # BB#0:
236; AVX-NEXT: vpsrlq $32, %ymm0, %ymm0
237; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
238; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
239; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
240; AVX-NEXT: vzeroupper
241; AVX-NEXT: retq
242 %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
243 %2 = trunc <4 x i64> %1 to <4 x i32>
244 %3 = lshr <4 x i32> %2, <i32 16, i32 16, i32 16, i32 16>
245 ret <4 x i32> %3
246}
247
248define <4 x i32> @combine_vec_lshr_trunc_lshr1(<4 x i64> %x) {
249; SSE-LABEL: combine_vec_lshr_trunc_lshr1:
250; SSE: # BB#0:
251; SSE-NEXT: movdqa %xmm0, %xmm2
252; SSE-NEXT: psrlq $33, %xmm2
253; SSE-NEXT: psrlq $32, %xmm0
254; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
255; SSE-NEXT: movdqa %xmm1, %xmm2
256; SSE-NEXT: psrlq $35, %xmm2
257; SSE-NEXT: psrlq $34, %xmm1
258; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
259; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
260; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
261; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
262; SSE-NEXT: movdqa %xmm0, %xmm1
263; SSE-NEXT: psrld $19, %xmm1
264; SSE-NEXT: movdqa %xmm0, %xmm2
265; SSE-NEXT: psrld $17, %xmm2
266; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
267; SSE-NEXT: movdqa %xmm0, %xmm1
268; SSE-NEXT: psrld $18, %xmm1
269; SSE-NEXT: psrld $16, %xmm0
270; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
271; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
272; SSE-NEXT: retq
273;
274; AVX-LABEL: combine_vec_lshr_trunc_lshr1:
275; AVX: # BB#0:
276; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
277; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
278; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
279; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
280; AVX-NEXT: vzeroupper
281; AVX-NEXT: retq
282 %1 = lshr <4 x i64> %x, <i64 32, i64 33, i64 34, i64 35>
283 %2 = trunc <4 x i64> %1 to <4 x i32>
284 %3 = lshr <4 x i32> %2, <i32 16, i32 17, i32 18, i32 19>
285 ret <4 x i32> %3
286}
287
288; fold (srl (trunc (srl x, c1)), c2) -> 0
289define <4 x i32> @combine_vec_lshr_trunc_lshr_zero0(<4 x i64> %x) {
290; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero0:
291; SSE: # BB#0:
Sanjay Patel9ca028c2016-10-23 23:13:31 +0000292; SSE-NEXT: xorps %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000293; SSE-NEXT: retq
294;
295; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero0:
296; AVX: # BB#0:
Sanjay Patel9ca028c2016-10-23 23:13:31 +0000297; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000298; AVX-NEXT: retq
299 %1 = lshr <4 x i64> %x, <i64 48, i64 48, i64 48, i64 48>
300 %2 = trunc <4 x i64> %1 to <4 x i32>
301 %3 = lshr <4 x i32> %2, <i32 24, i32 24, i32 24, i32 24>
302 ret <4 x i32> %3
303}
304
305define <4 x i32> @combine_vec_lshr_trunc_lshr_zero1(<4 x i64> %x) {
306; SSE-LABEL: combine_vec_lshr_trunc_lshr_zero1:
307; SSE: # BB#0:
308; SSE-NEXT: movdqa %xmm0, %xmm2
309; SSE-NEXT: psrlq $49, %xmm2
310; SSE-NEXT: psrlq $48, %xmm0
311; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
312; SSE-NEXT: movdqa %xmm1, %xmm2
313; SSE-NEXT: psrlq $51, %xmm2
314; SSE-NEXT: psrlq $50, %xmm1
315; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
316; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
317; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
318; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
319; SSE-NEXT: movdqa %xmm0, %xmm1
320; SSE-NEXT: psrld $27, %xmm1
321; SSE-NEXT: movdqa %xmm0, %xmm2
322; SSE-NEXT: psrld $25, %xmm2
323; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
324; SSE-NEXT: movdqa %xmm0, %xmm1
325; SSE-NEXT: psrld $26, %xmm1
326; SSE-NEXT: psrld $24, %xmm0
327; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
328; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
329; SSE-NEXT: retq
330;
331; AVX-LABEL: combine_vec_lshr_trunc_lshr_zero1:
332; AVX: # BB#0:
333; AVX-NEXT: vpsrlvq {{.*}}(%rip), %ymm0, %ymm0
334; AVX-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7]
335; AVX-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3]
336; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
337; AVX-NEXT: vzeroupper
338; AVX-NEXT: retq
339 %1 = lshr <4 x i64> %x, <i64 48, i64 49, i64 50, i64 51>
340 %2 = trunc <4 x i64> %1 to <4 x i32>
341 %3 = lshr <4 x i32> %2, <i32 24, i32 25, i32 26, i32 27>
342 ret <4 x i32> %3
343}
344
345; fold (srl (shl x, c), c) -> (and x, cst2)
346define <4 x i32> @combine_vec_lshr_shl_mask0(<4 x i32> %x) {
347; SSE-LABEL: combine_vec_lshr_shl_mask0:
348; SSE: # BB#0:
349; SSE-NEXT: andps {{.*}}(%rip), %xmm0
350; SSE-NEXT: retq
351;
352; AVX-LABEL: combine_vec_lshr_shl_mask0:
353; AVX: # BB#0:
354; AVX-NEXT: vbroadcastss {{.*}}(%rip), %xmm1
355; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
356; AVX-NEXT: retq
357 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
358 %2 = lshr <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
359 ret <4 x i32> %2
360}
361
362define <4 x i32> @combine_vec_lshr_shl_mask1(<4 x i32> %x) {
363; SSE-LABEL: combine_vec_lshr_shl_mask1:
364; SSE: # BB#0:
Simon Pilgrim618d3ae2016-10-20 11:10:21 +0000365; SSE-NEXT: andps {{.*}}(%rip), %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000366; SSE-NEXT: retq
367;
368; AVX-LABEL: combine_vec_lshr_shl_mask1:
369; AVX: # BB#0:
Simon Pilgrim618d3ae2016-10-20 11:10:21 +0000370; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim476560a2016-10-18 19:28:12 +0000371; AVX-NEXT: retq
372 %1 = shl <4 x i32> %x, <i32 2, i32 3, i32 4, i32 5>
373 %2 = lshr <4 x i32> %1, <i32 2, i32 3, i32 4, i32 5>
374 ret <4 x i32> %2
375}
376
377; fold (srl (sra X, Y), 31) -> (srl X, 31)
378define <4 x i32> @combine_vec_lshr_ashr_sign(<4 x i32> %x, <4 x i32> %y) {
379; SSE-LABEL: combine_vec_lshr_ashr_sign:
380; SSE: # BB#0:
381; SSE-NEXT: psrld $31, %xmm0
382; SSE-NEXT: retq
383;
384; AVX-LABEL: combine_vec_lshr_ashr_sign:
385; AVX: # BB#0:
386; AVX-NEXT: vpsrld $31, %xmm0, %xmm0
387; AVX-NEXT: retq
388 %1 = ashr <4 x i32> %x, %y
389 %2 = lshr <4 x i32> %1, <i32 31, i32 31, i32 31, i32 31>
390 ret <4 x i32> %2
391}
392
393; fold (srl (ctlz x), "5") -> x iff x has one bit set (the low bit).
394define <4 x i32> @combine_vec_lshr_lzcnt_bit0(<4 x i32> %x) {
395; SSE-LABEL: combine_vec_lshr_lzcnt_bit0:
396; SSE: # BB#0:
397; SSE-NEXT: pand {{.*}}(%rip), %xmm0
398; SSE-NEXT: psrld $4, %xmm0
399; SSE-NEXT: pxor {{.*}}(%rip), %xmm0
400; SSE-NEXT: retq
401;
402; AVX-LABEL: combine_vec_lshr_lzcnt_bit0:
403; AVX: # BB#0:
404; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
405; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
406; AVX-NEXT: vpsrld $4, %xmm0, %xmm0
407; AVX-NEXT: vpbroadcastd {{.*}}(%rip), %xmm1
408; AVX-NEXT: vpxor %xmm1, %xmm0, %xmm0
409; AVX-NEXT: retq
410 %1 = and <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
411 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
412 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
413 ret <4 x i32> %3
414}
415
416define <4 x i32> @combine_vec_lshr_lzcnt_bit1(<4 x i32> %x) {
417; SSE-LABEL: combine_vec_lshr_lzcnt_bit1:
418; SSE: # BB#0:
419; SSE-NEXT: pand {{.*}}(%rip), %xmm0
420; SSE-NEXT: movdqa {{.*#+}} xmm2 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
421; SSE-NEXT: movdqa %xmm0, %xmm1
422; SSE-NEXT: pand %xmm2, %xmm1
423; SSE-NEXT: movdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
424; SSE-NEXT: movdqa %xmm3, %xmm4
425; SSE-NEXT: pshufb %xmm1, %xmm4
426; SSE-NEXT: movdqa %xmm0, %xmm1
427; SSE-NEXT: psrlw $4, %xmm1
428; SSE-NEXT: pand %xmm2, %xmm1
429; SSE-NEXT: pxor %xmm2, %xmm2
430; SSE-NEXT: pshufb %xmm1, %xmm3
431; SSE-NEXT: pcmpeqb %xmm2, %xmm1
432; SSE-NEXT: pand %xmm4, %xmm1
433; SSE-NEXT: paddb %xmm3, %xmm1
434; SSE-NEXT: movdqa %xmm0, %xmm3
435; SSE-NEXT: pcmpeqb %xmm2, %xmm3
436; SSE-NEXT: psrlw $8, %xmm3
437; SSE-NEXT: pand %xmm1, %xmm3
438; SSE-NEXT: psrlw $8, %xmm1
439; SSE-NEXT: paddw %xmm3, %xmm1
440; SSE-NEXT: pcmpeqw %xmm2, %xmm0
441; SSE-NEXT: psrld $16, %xmm0
442; SSE-NEXT: pand %xmm1, %xmm0
443; SSE-NEXT: psrld $16, %xmm1
444; SSE-NEXT: paddd %xmm0, %xmm1
445; SSE-NEXT: psrld $5, %xmm1
446; SSE-NEXT: movdqa %xmm1, %xmm0
447; SSE-NEXT: retq
448;
449; AVX-LABEL: combine_vec_lshr_lzcnt_bit1:
450; AVX: # BB#0:
451; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
452; AVX-NEXT: vmovdqa {{.*#+}} xmm1 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
453; AVX-NEXT: vpand %xmm1, %xmm0, %xmm2
454; AVX-NEXT: vmovdqa {{.*#+}} xmm3 = [4,3,2,2,1,1,1,1,0,0,0,0,0,0,0,0]
455; AVX-NEXT: vpshufb %xmm2, %xmm3, %xmm2
456; AVX-NEXT: vpsrlw $4, %xmm0, %xmm4
457; AVX-NEXT: vpand %xmm1, %xmm4, %xmm1
458; AVX-NEXT: vpxor %xmm4, %xmm4, %xmm4
459; AVX-NEXT: vpcmpeqb %xmm4, %xmm1, %xmm5
460; AVX-NEXT: vpand %xmm5, %xmm2, %xmm2
461; AVX-NEXT: vpshufb %xmm1, %xmm3, %xmm1
462; AVX-NEXT: vpaddb %xmm1, %xmm2, %xmm1
463; AVX-NEXT: vpcmpeqb %xmm4, %xmm0, %xmm2
464; AVX-NEXT: vpsrlw $8, %xmm2, %xmm2
465; AVX-NEXT: vpand %xmm2, %xmm1, %xmm2
466; AVX-NEXT: vpsrlw $8, %xmm1, %xmm1
467; AVX-NEXT: vpaddw %xmm2, %xmm1, %xmm1
468; AVX-NEXT: vpcmpeqw %xmm4, %xmm0, %xmm0
469; AVX-NEXT: vpsrld $16, %xmm0, %xmm0
470; AVX-NEXT: vpand %xmm0, %xmm1, %xmm0
471; AVX-NEXT: vpsrld $16, %xmm1, %xmm1
472; AVX-NEXT: vpaddd %xmm0, %xmm1, %xmm0
473; AVX-NEXT: vpsrld $5, %xmm0, %xmm0
474; AVX-NEXT: retq
475 %1 = and <4 x i32> %x, <i32 4, i32 32, i32 64, i32 128>
476 %2 = call <4 x i32> @llvm.ctlz.v4i32(<4 x i32> %1, i1 0)
477 %3 = lshr <4 x i32> %2, <i32 5, i32 5, i32 5, i32 5>
478 ret <4 x i32> %3
479}
480declare <4 x i32> @llvm.ctlz.v4i32(<4 x i32>, i1)
481
482; fold (srl x, (trunc (and y, c))) -> (srl x, (and (trunc y), (trunc c))).
483define <4 x i32> @combine_vec_lshr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
484; SSE-LABEL: combine_vec_lshr_trunc_and:
485; SSE: # BB#0:
Simon Pilgrim476560a2016-10-18 19:28:12 +0000486; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
487; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
488; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
Simon Pilgrimb2ca2502016-10-19 08:57:37 +0000489; SSE-NEXT: pand {{.*}}(%rip), %xmm1
Simon Pilgrim476560a2016-10-18 19:28:12 +0000490; SSE-NEXT: movdqa %xmm1, %xmm2
491; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
492; SSE-NEXT: movdqa %xmm0, %xmm3
493; SSE-NEXT: psrld %xmm2, %xmm3
494; SSE-NEXT: movdqa %xmm1, %xmm2
495; SSE-NEXT: psrlq $32, %xmm2
496; SSE-NEXT: movdqa %xmm0, %xmm4
497; SSE-NEXT: psrld %xmm2, %xmm4
498; SSE-NEXT: pblendw {{.*#+}} xmm4 = xmm4[0,1,2,3],xmm3[4,5,6,7]
499; SSE-NEXT: pxor %xmm2, %xmm2
500; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm1[0],zero,xmm1[1],zero
501; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
502; SSE-NEXT: movdqa %xmm0, %xmm2
503; SSE-NEXT: psrld %xmm1, %xmm2
504; SSE-NEXT: psrld %xmm3, %xmm0
505; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
506; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
507; SSE-NEXT: retq
508;
509; AVX-LABEL: combine_vec_lshr_trunc_and:
510; AVX: # BB#0:
Simon Pilgrim476560a2016-10-18 19:28:12 +0000511; AVX-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
512; AVX-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
Simon Pilgrimb2ca2502016-10-19 08:57:37 +0000513; AVX-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
Simon Pilgrim476560a2016-10-18 19:28:12 +0000514; AVX-NEXT: vpsrlvd %xmm1, %xmm0, %xmm0
515; AVX-NEXT: vzeroupper
516; AVX-NEXT: retq
517 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
518 %2 = trunc <4 x i64> %1 to <4 x i32>
519 %3 = lshr <4 x i32> %x, %2
520 ret <4 x i32> %3
521}