blob: ddb1786e37d52e2be69126717c53f76d5c522b92 [file] [log] [blame]
Simon Pilgrim61cdeb42016-10-25 20:25:47 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
Simon Pilgrimebe58192016-12-14 14:39:51 +00003; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
Simon Pilgrim61cdeb42016-10-25 20:25:47 +00005
6; fold (sdiv undef, x) -> 0
7define <4 x i32> @combine_vec_sdiv_undef0(<4 x i32> %x) {
8; SSE-LABEL: combine_vec_sdiv_undef0:
9; SSE: # BB#0:
10; SSE-NEXT: retq
11;
12; AVX-LABEL: combine_vec_sdiv_undef0:
13; AVX: # BB#0:
14; AVX-NEXT: retq
15 %1 = sdiv <4 x i32> undef, %x
16 ret <4 x i32> %1
17}
18
19; fold (sdiv x, undef) -> undef
20define <4 x i32> @combine_vec_sdiv_undef1(<4 x i32> %x) {
21; SSE-LABEL: combine_vec_sdiv_undef1:
22; SSE: # BB#0:
23; SSE-NEXT: retq
24;
25; AVX-LABEL: combine_vec_sdiv_undef1:
26; AVX: # BB#0:
27; AVX-NEXT: retq
28 %1 = sdiv <4 x i32> %x, undef
29 ret <4 x i32> %1
30}
31
32; fold (sdiv x, 1) -> x
33define <4 x i32> @combine_vec_sdiv_by_one(<4 x i32> %x) {
34; SSE-LABEL: combine_vec_sdiv_by_one:
35; SSE: # BB#0:
36; SSE-NEXT: retq
37;
38; AVX-LABEL: combine_vec_sdiv_by_one:
39; AVX: # BB#0:
40; AVX-NEXT: retq
41 %1 = sdiv <4 x i32> %x, <i32 1, i32 1, i32 1, i32 1>
42 ret <4 x i32> %1
43}
44
45; fold (sdiv x, -1) -> 0 - x
46define <4 x i32> @combine_vec_sdiv_by_negone(<4 x i32> %x) {
47; SSE-LABEL: combine_vec_sdiv_by_negone:
48; SSE: # BB#0:
49; SSE-NEXT: pxor %xmm1, %xmm1
50; SSE-NEXT: psubd %xmm0, %xmm1
51; SSE-NEXT: movdqa %xmm1, %xmm0
52; SSE-NEXT: retq
53;
54; AVX-LABEL: combine_vec_sdiv_by_negone:
55; AVX: # BB#0:
56; AVX-NEXT: vpxor %xmm1, %xmm1, %xmm1
57; AVX-NEXT: vpsubd %xmm0, %xmm1, %xmm0
58; AVX-NEXT: retq
59 %1 = sdiv <4 x i32> %x, <i32 -1, i32 -1, i32 -1, i32 -1>
60 ret <4 x i32> %1
61}
62
63; fold (sdiv x, y) -> (udiv x, y) iff x and y are positive
64define <4 x i32> @combine_vec_sdiv_by_pos0(<4 x i32> %x) {
65; SSE-LABEL: combine_vec_sdiv_by_pos0:
66; SSE: # BB#0:
67; SSE-NEXT: pand {{.*}}(%rip), %xmm0
68; SSE-NEXT: psrld $2, %xmm0
69; SSE-NEXT: retq
70;
71; AVX-LABEL: combine_vec_sdiv_by_pos0:
72; AVX: # BB#0:
73; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
74; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
75; AVX-NEXT: retq
76 %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
77 %2 = sdiv <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
78 ret <4 x i32> %2
79}
80
81define <4 x i32> @combine_vec_sdiv_by_pos1(<4 x i32> %x) {
82; SSE-LABEL: combine_vec_sdiv_by_pos1:
83; SSE: # BB#0:
84; SSE-NEXT: pand {{.*}}(%rip), %xmm0
Simon Pilgrim05ab8ff2016-12-14 15:08:13 +000085; SSE-NEXT: movdqa %xmm0, %xmm2
86; SSE-NEXT: movdqa %xmm0, %xmm1
87; SSE-NEXT: psrld $3, %xmm1
88; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
89; SSE-NEXT: psrld $4, %xmm0
90; SSE-NEXT: psrld $2, %xmm2
91; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm0[4,5,6,7]
92; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
93; SSE-NEXT: movdqa %xmm1, %xmm0
Simon Pilgrim61cdeb42016-10-25 20:25:47 +000094; SSE-NEXT: retq
95;
Simon Pilgrim05ab8ff2016-12-14 15:08:13 +000096; AVX1-LABEL: combine_vec_sdiv_by_pos1:
97; AVX1: # BB#0:
98; AVX1-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
99; AVX1-NEXT: vpsrld $4, %xmm0, %xmm1
100; AVX1-NEXT: vpsrld $2, %xmm0, %xmm2
101; AVX1-NEXT: vpblendw {{.*#+}} xmm1 = xmm2[0,1,2,3],xmm1[4,5,6,7]
102; AVX1-NEXT: vpsrld $3, %xmm0, %xmm2
103; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
104; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,3],xmm0[4,5],xmm1[6,7]
105; AVX1-NEXT: retq
106;
107; AVX2-LABEL: combine_vec_sdiv_by_pos1:
108; AVX2: # BB#0:
109; AVX2-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
110; AVX2-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
111; AVX2-NEXT: retq
Simon Pilgrim61cdeb42016-10-25 20:25:47 +0000112 %1 = and <4 x i32> %x, <i32 255, i32 255, i32 255, i32 255>
113 %2 = sdiv <4 x i32> %1, <i32 1, i32 4, i32 8, i32 16>
114 ret <4 x i32> %2
115}
116
117; fold (sdiv x, (1 << c)) -> x >>u c
118define <4 x i32> @combine_vec_sdiv_by_pow2a(<4 x i32> %x) {
119; SSE-LABEL: combine_vec_sdiv_by_pow2a:
120; SSE: # BB#0:
121; SSE-NEXT: movdqa %xmm0, %xmm1
122; SSE-NEXT: psrad $31, %xmm1
123; SSE-NEXT: psrld $30, %xmm1
124; SSE-NEXT: paddd %xmm0, %xmm1
125; SSE-NEXT: psrad $2, %xmm1
126; SSE-NEXT: movdqa %xmm1, %xmm0
127; SSE-NEXT: retq
128;
129; AVX-LABEL: combine_vec_sdiv_by_pow2a:
130; AVX: # BB#0:
131; AVX-NEXT: vpsrad $31, %xmm0, %xmm1
132; AVX-NEXT: vpsrld $30, %xmm1, %xmm1
133; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
134; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
135; AVX-NEXT: retq
136 %1 = sdiv <4 x i32> %x, <i32 4, i32 4, i32 4, i32 4>
137 ret <4 x i32> %1
138}
139
140define <4 x i32> @combine_vec_sdiv_by_pow2b(<4 x i32> %x) {
141; SSE-LABEL: combine_vec_sdiv_by_pow2b:
142; SSE: # BB#0:
143; SSE-NEXT: pextrd $1, %xmm0, %eax
144; SSE-NEXT: movl %eax, %ecx
145; SSE-NEXT: sarl $31, %ecx
146; SSE-NEXT: shrl $30, %ecx
147; SSE-NEXT: addl %eax, %ecx
148; SSE-NEXT: sarl $2, %ecx
149; SSE-NEXT: pextrd $2, %xmm0, %eax
150; SSE-NEXT: pextrd $3, %xmm0, %edx
151; SSE-NEXT: pinsrd $1, %ecx, %xmm0
152; SSE-NEXT: movl %eax, %ecx
153; SSE-NEXT: sarl $31, %ecx
154; SSE-NEXT: shrl $29, %ecx
155; SSE-NEXT: addl %eax, %ecx
156; SSE-NEXT: sarl $3, %ecx
157; SSE-NEXT: pinsrd $2, %ecx, %xmm0
158; SSE-NEXT: movl %edx, %eax
159; SSE-NEXT: sarl $31, %eax
160; SSE-NEXT: shrl $28, %eax
161; SSE-NEXT: addl %edx, %eax
162; SSE-NEXT: sarl $4, %eax
163; SSE-NEXT: pinsrd $3, %eax, %xmm0
164; SSE-NEXT: retq
165;
166; AVX-LABEL: combine_vec_sdiv_by_pow2b:
167; AVX: # BB#0:
168; AVX-NEXT: vpextrd $1, %xmm0, %eax
169; AVX-NEXT: movl %eax, %ecx
170; AVX-NEXT: sarl $31, %ecx
171; AVX-NEXT: shrl $30, %ecx
172; AVX-NEXT: addl %eax, %ecx
173; AVX-NEXT: sarl $2, %ecx
174; AVX-NEXT: vpinsrd $1, %ecx, %xmm0, %xmm1
175; AVX-NEXT: vpextrd $2, %xmm0, %eax
176; AVX-NEXT: movl %eax, %ecx
177; AVX-NEXT: sarl $31, %ecx
178; AVX-NEXT: shrl $29, %ecx
179; AVX-NEXT: addl %eax, %ecx
180; AVX-NEXT: sarl $3, %ecx
181; AVX-NEXT: vpinsrd $2, %ecx, %xmm1, %xmm1
182; AVX-NEXT: vpextrd $3, %xmm0, %eax
183; AVX-NEXT: movl %eax, %ecx
184; AVX-NEXT: sarl $31, %ecx
185; AVX-NEXT: shrl $28, %ecx
186; AVX-NEXT: addl %eax, %ecx
187; AVX-NEXT: sarl $4, %ecx
188; AVX-NEXT: vpinsrd $3, %ecx, %xmm1, %xmm0
189; AVX-NEXT: retq
190 %1 = sdiv <4 x i32> %x, <i32 1, i32 4, i32 8, i32 16>
191 ret <4 x i32> %1
192}