blob: 0ba3cb106dcb0aad635e0978d45f44d382446d15 [file] [log] [blame]
Simon Pilgrim730f83a2016-10-15 19:29:26 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
6; X32-LABEL: knownbits_mask_extract_sext:
7; X32: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +00008; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
9; X32-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000010; X32-NEXT: retl
11;
12; X64-LABEL: knownbits_mask_extract_sext:
13; X64: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +000014; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
15; X64-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000016; X64-NEXT: retq
17 %1 = and <8 x i16> %a0, <i16 15, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
18 %2 = extractelement <8 x i16> %1, i32 0
19 %3 = sext i16 %2 to i32
20 ret i32 %3
21}
22
23define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
24; X32-LABEL: knownbits_mask_extract_uitofp:
25; X32: # BB#0:
26; X32-NEXT: pushl %ebp
27; X32-NEXT: movl %esp, %ebp
28; X32-NEXT: andl $-8, %esp
29; X32-NEXT: subl $16, %esp
30; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
31; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000032; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000033; X32-NEXT: fildll {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000034; X32-NEXT: fstps {{[0-9]+}}(%esp)
Simon Pilgrim75a697a2016-10-29 11:29:39 +000035; X32-NEXT: flds {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000036; X32-NEXT: movl %ebp, %esp
37; X32-NEXT: popl %ebp
38; X32-NEXT: retl
39;
40; X64-LABEL: knownbits_mask_extract_uitofp:
41; X64: # BB#0:
42; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
43; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
44; X64-NEXT: vmovq %xmm0, %rax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000045; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
46; X64-NEXT: retq
Simon Pilgrim730f83a2016-10-15 19:29:26 +000047 %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
48 %2 = extractelement <2 x i64> %1, i32 0
49 %3 = uitofp i64 %2 to float
50 ret float %3
51}
52
53define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
54; X32-LABEL: knownbits_mask_shuffle_sext:
55; X32: # BB#0:
56; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000057; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
58; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000059; X32-NEXT: retl
60;
61; X64-LABEL: knownbits_mask_shuffle_sext:
62; X64: # BB#0:
63; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000064; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
65; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000066; X64-NEXT: retq
67 %1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
68 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
69 %3 = sext <4 x i16> %2 to <4 x i32>
70 ret <4 x i32> %3
71}
72
73define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
74; X32-LABEL: knownbits_mask_shuffle_uitofp:
75; X32: # BB#0:
76; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
77; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000078; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000079; X32-NEXT: retl
80;
81; X64-LABEL: knownbits_mask_shuffle_uitofp:
82; X64: # BB#0:
83; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
84; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000085; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000086; X64-NEXT: retq
87 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
88 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
89 %3 = uitofp <4 x i32> %2 to <4 x float>
90 ret <4 x float> %3
91}
Simon Pilgrimc1041852016-11-06 16:05:59 +000092
93define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
94; X32-LABEL: knownbits_mask_or_shuffle_uitofp:
95; X32: # BB#0:
96; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
97; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
98; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +000099; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000100; X32-NEXT: retl
101;
102; X64-LABEL: knownbits_mask_or_shuffle_uitofp:
103; X64: # BB#0:
104; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
105; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
106; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +0000107; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000108; X64-NEXT: retq
109 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
110 %2 = or <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
111 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
112 %4 = uitofp <4 x i32> %3 to <4 x float>
113 ret <4 x float> %4
114}
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000115
116define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
117; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
118; X32: # BB#0:
119; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
120; X32-NEXT: vpxor {{\.LCPI.*}}, %xmm0, %xmm0
121; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim39df78e2016-11-06 16:49:19 +0000122; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000123; X32-NEXT: retl
124;
125; X64-LABEL: knownbits_mask_xor_shuffle_uitofp:
126; X64: # BB#0:
127; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
128; X64-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
129; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim39df78e2016-11-06 16:49:19 +0000130; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000131; X64-NEXT: retq
132 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
133 %2 = xor <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
134 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
135 %4 = uitofp <4 x i32> %3 to <4 x float>
136 ret <4 x float> %4
137}
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000138
139define <4 x i32> @knownbits_mask_shl_shuffle_lshr(<4 x i32> %a0) nounwind {
140; X32-LABEL: knownbits_mask_shl_shuffle_lshr:
141; X32: # BB#0:
Simon Pilgrim3bf99c02016-11-10 13:52:42 +0000142; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000143; X32-NEXT: retl
144;
145; X64-LABEL: knownbits_mask_shl_shuffle_lshr:
146; X64: # BB#0:
Simon Pilgrim3bf99c02016-11-10 13:52:42 +0000147; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrimede8ad72016-11-10 13:34:17 +0000148; X64-NEXT: retq
149 %1 = and <4 x i32> %a0, <i32 -65536, i32 -7, i32 -7, i32 -65536>
150 %2 = shl <4 x i32> %1, <i32 17, i32 17, i32 17, i32 17>
151 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
152 %4 = lshr <4 x i32> %3, <i32 15, i32 15, i32 15, i32 15>
153 ret <4 x i32> %4
154}
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000155
156define <4 x i32> @knownbits_mask_ashr_shuffle_lshr(<4 x i32> %a0) nounwind {
157; X32-LABEL: knownbits_mask_ashr_shuffle_lshr:
158; X32: # BB#0:
Simon Pilgrimca57e532016-11-10 15:05:09 +0000159; X32-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000160; X32-NEXT: retl
161;
162; X64-LABEL: knownbits_mask_ashr_shuffle_lshr:
163; X64: # BB#0:
Simon Pilgrimca57e532016-11-10 15:05:09 +0000164; X64-NEXT: vxorps %xmm0, %xmm0, %xmm0
Simon Pilgrim7be6d992016-11-10 14:46:24 +0000165; X64-NEXT: retq
166 %1 = and <4 x i32> %a0, <i32 131071, i32 -1, i32 -1, i32 131071>
167 %2 = ashr <4 x i32> %1, <i32 15, i32 15, i32 15, i32 15>
168 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 0, i32 0, i32 3, i32 3>
169 %4 = lshr <4 x i32> %3, <i32 30, i32 30, i32 30, i32 30>
170 ret <4 x i32> %4
171}