blob: 0fd2899c1303b6fa4dea3b510c2e32599ba67146 [file] [log] [blame]
Simon Pilgrim730f83a2016-10-15 19:29:26 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5define i32 @knownbits_mask_extract_sext(<8 x i16> %a0) nounwind {
6; X32-LABEL: knownbits_mask_extract_sext:
7; X32: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +00008; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
9; X32-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000010; X32-NEXT: retl
11;
12; X64-LABEL: knownbits_mask_extract_sext:
13; X64: # BB#0:
Simon Pilgrim75a697a2016-10-29 11:29:39 +000014; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
15; X64-NEXT: vpextrw $0, %xmm0, %eax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000016; X64-NEXT: retq
17 %1 = and <8 x i16> %a0, <i16 15, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1, i16 -1>
18 %2 = extractelement <8 x i16> %1, i32 0
19 %3 = sext i16 %2 to i32
20 ret i32 %3
21}
22
23define float @knownbits_mask_extract_uitofp(<2 x i64> %a0) nounwind {
24; X32-LABEL: knownbits_mask_extract_uitofp:
25; X32: # BB#0:
26; X32-NEXT: pushl %ebp
27; X32-NEXT: movl %esp, %ebp
28; X32-NEXT: andl $-8, %esp
29; X32-NEXT: subl $16, %esp
30; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
31; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000032; X32-NEXT: vmovq %xmm0, {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000033; X32-NEXT: fildll {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000034; X32-NEXT: fstps {{[0-9]+}}(%esp)
Simon Pilgrim75a697a2016-10-29 11:29:39 +000035; X32-NEXT: flds {{[0-9]+}}(%esp)
Simon Pilgrim730f83a2016-10-15 19:29:26 +000036; X32-NEXT: movl %ebp, %esp
37; X32-NEXT: popl %ebp
38; X32-NEXT: retl
39;
40; X64-LABEL: knownbits_mask_extract_uitofp:
41; X64: # BB#0:
42; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
43; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3],xmm0[4,5,6,7]
44; X64-NEXT: vmovq %xmm0, %rax
Simon Pilgrim730f83a2016-10-15 19:29:26 +000045; X64-NEXT: vcvtsi2ssq %rax, %xmm2, %xmm0
46; X64-NEXT: retq
Simon Pilgrim730f83a2016-10-15 19:29:26 +000047 %1 = and <2 x i64> %a0, <i64 65535, i64 -1>
48 %2 = extractelement <2 x i64> %1, i32 0
49 %3 = uitofp i64 %2 to float
50 ret float %3
51}
52
53define <4 x i32> @knownbits_mask_shuffle_sext(<8 x i16> %a0) nounwind {
54; X32-LABEL: knownbits_mask_shuffle_sext:
55; X32: # BB#0:
56; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000057; X32-NEXT: vpxor %xmm1, %xmm1, %xmm1
58; X32-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000059; X32-NEXT: retl
60;
61; X64-LABEL: knownbits_mask_shuffle_sext:
62; X64: # BB#0:
63; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
Simon Pilgrim75a697a2016-10-29 11:29:39 +000064; X64-NEXT: vpxor %xmm1, %xmm1, %xmm1
65; X64-NEXT: vpunpckhwd {{.*#+}} xmm0 = xmm0[4],xmm1[4],xmm0[5],xmm1[5],xmm0[6],xmm1[6],xmm0[7],xmm1[7]
Simon Pilgrim730f83a2016-10-15 19:29:26 +000066; X64-NEXT: retq
67 %1 = and <8 x i16> %a0, <i16 -1, i16 -1, i16 -1, i16 -1, i16 15, i16 15, i16 15, i16 15>
68 %2 = shufflevector <8 x i16> %1, <8 x i16> undef, <4 x i32> <i32 4, i32 5, i32 6, i32 7>
69 %3 = sext <4 x i16> %2 to <4 x i32>
70 ret <4 x i32> %3
71}
72
73define <4 x float> @knownbits_mask_shuffle_uitofp(<4 x i32> %a0) nounwind {
74; X32-LABEL: knownbits_mask_shuffle_uitofp:
75; X32: # BB#0:
76; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
77; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000078; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000079; X32-NEXT: retl
80;
81; X64-LABEL: knownbits_mask_shuffle_uitofp:
82; X64: # BB#0:
83; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
84; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrim75a697a2016-10-29 11:29:39 +000085; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrim730f83a2016-10-15 19:29:26 +000086; X64-NEXT: retq
87 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
88 %2 = shufflevector <4 x i32> %1, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
89 %3 = uitofp <4 x i32> %2 to <4 x float>
90 ret <4 x float> %3
91}
Simon Pilgrimc1041852016-11-06 16:05:59 +000092
93define <4 x float> @knownbits_mask_or_shuffle_uitofp(<4 x i32> %a0) nounwind {
94; X32-LABEL: knownbits_mask_or_shuffle_uitofp:
95; X32: # BB#0:
96; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
97; X32-NEXT: vpor {{\.LCPI.*}}, %xmm0, %xmm0
98; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +000099; X32-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000100; X32-NEXT: retl
101;
102; X64-LABEL: knownbits_mask_or_shuffle_uitofp:
103; X64: # BB#0:
104; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
105; X64-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
106; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
Simon Pilgrimdd4809a2016-11-06 16:29:09 +0000107; X64-NEXT: vcvtdq2ps %xmm0, %xmm0
Simon Pilgrimc1041852016-11-06 16:05:59 +0000108; X64-NEXT: retq
109 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
110 %2 = or <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
111 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
112 %4 = uitofp <4 x i32> %3 to <4 x float>
113 ret <4 x float> %4
114}
Simon Pilgrim3ac353c2016-11-06 16:36:29 +0000115
116define <4 x float> @knownbits_mask_xor_shuffle_uitofp(<4 x i32> %a0) nounwind {
117; X32-LABEL: knownbits_mask_xor_shuffle_uitofp:
118; X32: # BB#0:
119; X32-NEXT: vpand {{\.LCPI.*}}, %xmm0, %xmm0
120; X32-NEXT: vpxor {{\.LCPI.*}}, %xmm0, %xmm0
121; X32-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
122; X32-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
123; X32-NEXT: vpsrld $16, %xmm0, %xmm0
124; X32-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
125; X32-NEXT: vaddps {{\.LCPI.*}}, %xmm0, %xmm0
126; X32-NEXT: vaddps %xmm0, %xmm1, %xmm0
127; X32-NEXT: retl
128;
129; X64-LABEL: knownbits_mask_xor_shuffle_uitofp:
130; X64: # BB#0:
131; X64-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
132; X64-NEXT: vpxor {{.*}}(%rip), %xmm0, %xmm0
133; X64-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,2,3,3]
134; X64-NEXT: vpblendw {{.*#+}} xmm1 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
135; X64-NEXT: vpsrld $16, %xmm0, %xmm0
136; X64-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0],mem[1],xmm0[2],mem[3],xmm0[4],mem[5],xmm0[6],mem[7]
137; X64-NEXT: vaddps {{.*}}(%rip), %xmm0, %xmm0
138; X64-NEXT: vaddps %xmm0, %xmm1, %xmm0
139; X64-NEXT: retq
140 %1 = and <4 x i32> %a0, <i32 -1, i32 -1, i32 255, i32 4085>
141 %2 = xor <4 x i32> %1, <i32 65535, i32 65535, i32 65535, i32 65535>
142 %3 = shufflevector <4 x i32> %2, <4 x i32> undef, <4 x i32> <i32 2, i32 2, i32 3, i32 3>
143 %4 = uitofp <4 x i32> %3 to <4 x float>
144 ret <4 x float> %4
145}