blob: cb50d5f9611c148b6e9d000c686b3f4fe7aee282 [file] [log] [blame]
Simon Pilgrim730f83a2016-10-15 19:29:26 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
6; X32-LABEL: knownbits_mask_extract_sext:
7; X32: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +00008; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
9; X32-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000010; X32-NEXT: retl
11;
12; X64-LABEL: knownbits_mask_extract_sext:
13; X64: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +000014; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
15; X64-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000016; X64-NEXT: retq
17 %1 = and <8 x i16> %a0, <i16 15, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
18 %2 = extractelement <8 x i16> %1, i32 0
19 %3 = sext i16 %2 to i32
20 ret i32 %3
21}
22
23define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
24; X32-LABEL: knownbits_mask_extract_uitofp:
25; X32: # BB#0:
26; X32-NEXT: pushl %ebp
27; X32-NEXT: movl %esp, %ebp
28; X32-NEXT: andl $-8, %esp
29; X32-NEXT: subl $16, %esp
30; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
31; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000032; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000033; X32-NEXT: fildll {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000034; X32-NEXT: fstps {{[0-9]+}}(%esp)
Simon Pilgrim75a697a2016-10-29 11:29:39 +000035; X32-NEXT: flds {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000036; X32-NEXT: movl %ebp, %esp
37; X32-NEXT: popl %ebp
38; X32-NEXT: retl
39;
40; X64-LABEL: knownbits_mask_extract_uitofp:
41; X64: # BB#0:
42; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
43; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
44; X64-NEXT: vmovq %xmm0, %rax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000045; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
46; X64-NEXT: retq
Simon Pilgrim730f83a2016-10-15 19:29:26 +000047 %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
48 %2 = extractelement <2 x i64> %1, i32 0
49 %3 = uitofp i64 %2 to float
50 ret float %3
51}
52
53define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
54; X32-LABEL: knownbits_mask_shuffle_sext:
55; X32: # BB#0:
56; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000057; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
58; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000059; X32-NEXT: retl
60;
61; X64-LABEL: knownbits_mask_shuffle_sext:
62; X64: # BB#0:
63; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000064; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
65; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000066; X64-NEXT: retq
67 %1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
68 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
69 %3 = sext <4 x i16> %2 to <4 x i32>
70 ret <4 x i32> %3
71}
72
73define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
74; X32-LABEL: knownbits_mask_shuffle_uitofp:
75; X32: # BB#0:
76; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
77; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000078; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000079; X32-NEXT: retl
80;
81; X64-LABEL: knownbits_mask_shuffle_uitofp:
82; X64: # BB#0:
83; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
84; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000085; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000086; X64-NEXT: retq
87 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
88 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
89 %3 = uitofp <4 x i32> %2 to <4 x float>
90 ret <4 x float> %3
91}
Simon Pilgrimc1041852016-11-06 16:05:59 +000092
93define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
94; X32-LABEL: knownbits_mask_or_shuffle_uitofp:
95; X32: # BB#0:
96; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
97; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
98; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +000099; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000100; X32-NEXT: retl
101;
102; X64-LABEL: knownbits_mask_or_shuffle_uitofp:
103; X64: # BB#0:
104; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
105; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
106; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +0000107; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000108; X64-NEXT: retq
109 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
110 %2 = or <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
111 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
112 %4 = uitofp <4 x i32> %3 to <4 x float>
113 ret <4 x float> %4
114}
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000115
116define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
117; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
118; X32: # BB#0:
119; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
120; X32-NEXT: vpxor {{\.LCPI.*}}, %xmm0, %xmm0
121; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim39df78e2016-11-06 16:49:19 +0000122; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000123; X32-NEXT: retl
124;
125; X64-LABEL: knownbits_mask_xor_shuffle_uitofp:
126; X64: # BB#0:
127; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
128; X64-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
129; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim39df78e2016-11-06 16:49:19 +0000130; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000131; X64-NEXT: retq
132 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
133 %2 = xor <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
134 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
135 %4 = uitofp <4 x i32> %3 to <4 x float>
136 ret <4 x float> %4
137}
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000138
139define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind {
140; X32-LABEL: knownbits_mask_shl_shuffle_lshr:
141; X32: # BB#0:
Simon Pilgrim3bf99c02016-11-10 13:52:42 +0000142; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000143; X32-NEXT: retl
144;
145; X64-LABEL: knownbits_mask_shl_shuffle_lshr:
146; X64: # BB#0:
Simon Pilgrim3bf99c02016-11-10 13:52:42 +0000147; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000148; X64-NEXT: retq
149 %1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
150 %2 = shl <4 x i32> %1, <i32 17, i32 17, i32 17, i32 17>
151 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
152 %4 = lshr <4 x i32> %3, <i32 15, i32 15, i32 15, i32 15>
153 ret <4 x i32> %4
154}
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000155
156define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind {
157; X32-LABEL: knownbits_mask_ashr_shuffle_lshr:
158; X32: # BB#0:
Simon Pilgrimca57e532016-11-10 15:05:09 +0000159; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000160; X32-NEXT: retl
161;
162; X64-LABEL: knownbits_mask_ashr_shuffle_lshr:
163; X64: # BB#0:
Simon Pilgrimca57e532016-11-10 15:05:09 +0000164; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000165; X64-NEXT: retq
166 %1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 -1, i32 131071>
167 %2 = ashr <4 x i32> %1, <i32 15, i32 15, i32 15, i32 15>
168 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
169 %4 = lshr <4 x i32> %3, <i32 30, i32 30, i32 30, i32 30>
170 ret <4 x i32> %4
171}
Simon Pilgrim2cf393c2016-11-10 15:57:33 +0000172
173define <4 x i32> @knownbits_mask_mul_shuffle_shl(<4 x i32> %a0, <4 x i32> %a1) nounwind {
174; X32-LABEL: knownbits_mask_mul_shuffle_shl:
175; X32: # BB#0:
Simon Pilgrimee187fd2016-11-10 16:27:42 +0000176; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim2cf393c2016-11-10 15:57:33 +0000177; X32-NEXT: retl
178;
179; X64-LABEL: knownbits_mask_mul_shuffle_shl:
180; X64: # BB#0:
Simon Pilgrimee187fd2016-11-10 16:27:42 +0000181; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim2cf393c2016-11-10 15:57:33 +0000182; X64-NEXT: retq
183 %1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
184 %2 = mul <4 x i32> %a1, %1
185 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
186 %4 = shl <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
187 ret <4 x i32> %4
188}
Simon Pilgrime517f0a2016-11-10 17:24:33 +0000189
190define <4 x i32> @knownbits_mask_trunc_shuffle_shl(<4 x i64> %a0) nounwind {
191; X32-LABEL: knownbits_mask_trunc_shuffle_shl:
192; X32: # BB#0:
Simon Pilgrimd67af682016-11-10 17:43:52 +0000193; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrime517f0a2016-11-10 17:24:33 +0000194; X32-NEXT: retl
195;
196; X64-LABEL: knownbits_mask_trunc_shuffle_shl:
197; X64: # BB#0:
Simon Pilgrimd67af682016-11-10 17:43:52 +0000198; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrime517f0a2016-11-10 17:24:33 +0000199; X64-NEXT: retq
200 %1 = and <4 x i64> %a0, <i64 -65536, i64 -7, i64 7, i64 -65536>
201 %2 = trunc <4 x i64> %1 to <4 x i32>
202 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
203 %4 = shl <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
204 ret <4 x i32> %4
205}
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000206
Simon Pilgrima0dee612016-11-10 22:34:12 +0000207define <4 x i32> @knownbits_mask_add_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
208; X32-LABEL: knownbits_mask_add_shuffle_lshr:
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000209; X32: # BB#0:
Simon Pilgrim38f00452016-11-10 22:41:49 +0000210; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000211; X32-NEXT: retl
212;
Simon Pilgrima0dee612016-11-10 22:34:12 +0000213; X64-LABEL: knownbits_mask_add_shuffle_lshr:
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000214; X64: # BB#0:
Simon Pilgrim38f00452016-11-10 22:41:49 +0000215; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000216; X64-NEXT: retq
Simon Pilgrima0dee612016-11-10 22:34:12 +0000217 %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
218 %2 = and <4 x i32> %a1, <i32 32767, i32 -1, i32 -1, i32 32767>
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000219 %3 = add <4 x i32> %1, %2
Simon Pilgrima0dee612016-11-10 22:34:12 +0000220 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
221 %5 = lshr <4 x i32> %4, <i32 17, i32 17, i32 17, i32 17>
222 ret <4 x i32> %5
Simon Pilgrim8bbfaca2016-11-10 22:21:04 +0000223}
224
Simon Pilgrima0dee612016-11-10 22:34:12 +0000225define <4 x i32> @knownbits_mask_sub_shuffle_lshr(<4 x i32> %a0) nounwind {
226; X32-LABEL: knownbits_mask_sub_shuffle_lshr:
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000227; X32: # BB#0:
Simon Pilgrim38f00452016-11-10 22:41:49 +0000228; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000229; X32-NEXT: retl
230;
Simon Pilgrima0dee612016-11-10 22:34:12 +0000231; X64-LABEL: knownbits_mask_sub_shuffle_lshr:
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000232; X64: # BB#0:
Simon Pilgrim38f00452016-11-10 22:41:49 +0000233; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000234; X64-NEXT: retq
Simon Pilgrima0dee612016-11-10 22:34:12 +0000235 %1 = and <4 x i32> %a0, <i32 15, i32 -1, i32 -1, i32 15>
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000236 %2 = sub <4 x i32> <i32 255, i32 255, i32 255, i32 255>, %1
Simon Pilgrima0dee612016-11-10 22:34:12 +0000237 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
238 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
239 ret <4 x i32> %4
Simon Pilgrimda1a43e2016-11-11 10:39:15 +0000240}
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000241
Simon Pilgrimda1a43e2016-11-11 10:39:15 +0000242define <4 x i32> @knownbits_mask_udiv_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
243; X32-LABEL: knownbits_mask_udiv_shuffle_lshr:
244; X32: # BB#0:
Simon Pilgrim06522272016-11-11 10:47:24 +0000245; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimda1a43e2016-11-11 10:39:15 +0000246; X32-NEXT: retl
247;
248; X64-LABEL: knownbits_mask_udiv_shuffle_lshr:
249; X64: # BB#0:
Simon Pilgrim06522272016-11-11 10:47:24 +0000250; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimda1a43e2016-11-11 10:39:15 +0000251; X64-NEXT: retq
252 %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
253 %2 = udiv <4 x i32> %1, %a1
254 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
255 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
256 ret <4 x i32> %4
Simon Pilgrim7e0a4b82016-11-10 21:50:23 +0000257}
Simon Pilgrim8bc531d2016-11-11 11:11:40 +0000258
259define <4 x i32> @knownbits_urem_lshr(<4 x i32> %a0) nounwind {
260; X32-LABEL: knownbits_urem_lshr:
261; X32: # BB#0:
262; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
263; X32-NEXT: retl
264;
265; X64-LABEL: knownbits_urem_lshr:
266; X64: # BB#0:
267; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
268; X64-NEXT: retq
269 %1 = urem <4 x i32> %a0, <i32 16, i32 16, i32 16, i32 16>
270 %2 = lshr <4 x i32> %1, <i32 22, i32 22, i32 22, i32 22>
271 ret <4 x i32> %2
272}
273
274define <4 x i32> @knownbits_mask_urem_shuffle_lshr(<4 x i32> %a0, <4 x i32> %a1) nounwind {
275; X32-LABEL: knownbits_mask_urem_shuffle_lshr:
276; X32: # BB#0:
277; X32-NEXT: pushl %esi
278; X32-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767]
279; X32-NEXT: vpand %xmm2, %xmm0, %xmm0
280; X32-NEXT: vpand %xmm2, %xmm1, %xmm1
281; X32-NEXT: vpextrd $1, %xmm0, %eax
282; X32-NEXT: vpextrd $1, %xmm1, %ecx
283; X32-NEXT: xorl %edx, %edx
284; X32-NEXT: divl %ecx
285; X32-NEXT: movl %edx, %ecx
286; X32-NEXT: vmovd %xmm0, %eax
287; X32-NEXT: vmovd %xmm1, %esi
288; X32-NEXT: xorl %edx, %edx
289; X32-NEXT: divl %esi
290; X32-NEXT: vmovd %edx, %xmm2
291; X32-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
292; X32-NEXT: vpextrd $2, %xmm0, %eax
293; X32-NEXT: vpextrd $2, %xmm1, %ecx
294; X32-NEXT: xorl %edx, %edx
295; X32-NEXT: divl %ecx
296; X32-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
297; X32-NEXT: vpextrd $3, %xmm0, %eax
298; X32-NEXT: vpextrd $3, %xmm1, %ecx
299; X32-NEXT: xorl %edx, %edx
300; X32-NEXT: divl %ecx
301; X32-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
302; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
303; X32-NEXT: vpsrld $22, %xmm0, %xmm0
304; X32-NEXT: popl %esi
305; X32-NEXT: retl
306;
307; X64-LABEL: knownbits_mask_urem_shuffle_lshr:
308; X64: # BB#0:
309; X64-NEXT: vmovdqa {{.*#+}} xmm2 = [32767,4294967295,4294967295,32767]
310; X64-NEXT: vpand %xmm2, %xmm0, %xmm0
311; X64-NEXT: vpand %xmm2, %xmm1, %xmm1
312; X64-NEXT: vpextrd $1, %xmm0, %eax
313; X64-NEXT: vpextrd $1, %xmm1, %ecx
314; X64-NEXT: xorl %edx, %edx
315; X64-NEXT: divl %ecx
316; X64-NEXT: movl %edx, %ecx
317; X64-NEXT: vmovd %xmm0, %eax
318; X64-NEXT: vmovd %xmm1, %esi
319; X64-NEXT: xorl %edx, %edx
320; X64-NEXT: divl %esi
321; X64-NEXT: vmovd %edx, %xmm2
322; X64-NEXT: vpinsrd $1, %ecx, %xmm2, %xmm2
323; X64-NEXT: vpextrd $2, %xmm0, %eax
324; X64-NEXT: vpextrd $2, %xmm1, %ecx
325; X64-NEXT: xorl %edx, %edx
326; X64-NEXT: divl %ecx
327; X64-NEXT: vpinsrd $2, %edx, %xmm2, %xmm2
328; X64-NEXT: vpextrd $3, %xmm0, %eax
329; X64-NEXT: vpextrd $3, %xmm1, %ecx
330; X64-NEXT: xorl %edx, %edx
331; X64-NEXT: divl %ecx
332; X64-NEXT: vpinsrd $3, %edx, %xmm2, %xmm0
333; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
334; X64-NEXT: vpsrld $22, %xmm0, %xmm0
335; X64-NEXT: retq
336 %1 = and <4 x i32> %a0, <i32 32767, i32 -1, i32 -1, i32 32767>
337 %2 = and <4 x i32> %a1, <i32 32767, i32 -1, i32 -1, i32 32767>
338 %3 = urem <4 x i32> %1, %2
339 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
340 %5 = lshr <4 x i32> %4, <i32 22, i32 22, i32 22, i32 22>
341 ret <4 x i32> %5
342}
343
344define <4 x i32> @knownbits_mask_srem_shuffle_lshr(<4 x i32> %a0) nounwind {
345; X32-LABEL: knownbits_mask_srem_shuffle_lshr:
346; X32: # BB#0:
347; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
348; X32-NEXT: vpsrad $31, %xmm0, %xmm1
349; X32-NEXT: vpsrld $28, %xmm1, %xmm1
350; X32-NEXT: vpaddd %xmm1, %xmm0, %xmm1
351; X32-NEXT: vpand {{\.LCPI.*}}, %xmm1, %xmm1
352; X32-NEXT: vpsubd %xmm1, %xmm0, %xmm0
353; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
354; X32-NEXT: vpsrld $22, %xmm0, %xmm0
355; X32-NEXT: retl
356;
357; X64-LABEL: knownbits_mask_srem_shuffle_lshr:
358; X64: # BB#0:
359; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
360; X64-NEXT: vpsrad $31, %xmm0, %xmm1
361; X64-NEXT: vpsrld $28, %xmm1, %xmm1
362; X64-NEXT: vpaddd %xmm1, %xmm0, %xmm1
363; X64-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
364; X64-NEXT: vpsubd %xmm1, %xmm0, %xmm0
365; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,3,3]
366; X64-NEXT: vpsrld $22, %xmm0, %xmm0
367; X64-NEXT: retq
368 %1 = and <4 x i32> %a0, <i32 -32768, i32 -1, i32 -1, i32 -32768>
369 %2 = srem <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
370 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
371 %4 = lshr <4 x i32> %3, <i32 22, i32 22, i32 22, i32 22>
372 ret <4 x i32> %4
373}