blob: ebfe0d56358eb070594f962bf4f69cb2b855421b [file] [log] [blame]
Simon Pilgrimf0766382017-06-26 16:22:52 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
Benjamin Kramer293f3432017-12-27 13:31:50 +00003; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=AVX
Craig Topperb28460a2017-12-25 06:47:08 +00004; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512VL
6; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512DQVL
Simon Pilgrimf0766382017-06-26 16:22:52 +00007
8; TODO - shuffle+sext are superfluous
9define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
10; SSE-LABEL: combine_shuffle_sext_pmuldq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000011; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +000012; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
13; SSE-NEXT: pmovsxdq %xmm0, %xmm2
14; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
15; SSE-NEXT: pmovsxdq %xmm0, %xmm0
16; SSE-NEXT: pmuldq %xmm2, %xmm0
17; SSE-NEXT: retq
18;
Craig Topper705fef32017-12-25 06:47:10 +000019; AVX-LABEL: combine_shuffle_sext_pmuldq:
20; AVX: # %bb.0:
21; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
22; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
23; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
24; AVX-NEXT: vpmovsxdq %xmm1, %xmm1
25; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
26; AVX-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +000027 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
28 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
29 %3 = sext <2 x i32> %1 to <2 x i64>
30 %4 = sext <2 x i32> %2 to <2 x i64>
31 %5 = mul nuw <2 x i64> %3, %4
32 ret <2 x i64> %5
33}
34
35; TODO - shuffle+zext are superfluous
36define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
37; SSE-LABEL: combine_shuffle_zext_pmuludq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000038; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +000039; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
40; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
41; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
42; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
43; SSE-NEXT: pmuludq %xmm2, %xmm0
44; SSE-NEXT: retq
45;
Craig Topper705fef32017-12-25 06:47:10 +000046; AVX-LABEL: combine_shuffle_zext_pmuludq:
47; AVX: # %bb.0:
48; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
49; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
50; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
51; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
52; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
53; AVX-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +000054 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
55 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
56 %3 = zext <2 x i32> %1 to <2 x i64>
57 %4 = zext <2 x i32> %2 to <2 x i64>
58 %5 = mul nuw <2 x i64> %3, %4
59 ret <2 x i64> %5
60}
61
62; TODO - blends are superfluous
63define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
64; SSE-LABEL: combine_shuffle_zero_pmuludq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000065; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +000066; SSE-NEXT: pxor %xmm2, %xmm2
67; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
68; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
69; SSE-NEXT: pmuludq %xmm1, %xmm0
70; SSE-NEXT: retq
71;
Craig Topperb28460a2017-12-25 06:47:08 +000072; AVX2-LABEL: combine_shuffle_zero_pmuludq:
73; AVX2: # %bb.0:
74; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
75; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
76; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
77; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
78; AVX2-NEXT: retq
79;
80; AVX512VL-LABEL: combine_shuffle_zero_pmuludq:
81; AVX512VL: # %bb.0:
82; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
83; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
84; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
85; AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
86; AVX512VL-NEXT: retq
87;
88; AVX512DQVL-LABEL: combine_shuffle_zero_pmuludq:
89; AVX512DQVL: # %bb.0:
90; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
91; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
92; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
Craig Topper72bbbeb2017-12-27 19:09:40 +000093; AVX512DQVL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
Craig Topperb28460a2017-12-25 06:47:08 +000094; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +000095 %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
96 %2 = shufflevector <4 x i32> %a1, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
97 %3 = bitcast <4 x i32> %1 to <2 x i64>
98 %4 = bitcast <4 x i32> %2 to <2 x i64>
99 %5 = mul <2 x i64> %3, %4
100 ret <2 x i64> %5
101}
102
103; TODO - blends are superfluous
104define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
105; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000106; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +0000107; SSE-NEXT: pxor %xmm4, %xmm4
108; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
109; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
110; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
111; SSE-NEXT: pmuludq %xmm3, %xmm1
112; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
113; SSE-NEXT: pmuludq %xmm2, %xmm0
114; SSE-NEXT: retq
115;
Craig Topperb28460a2017-12-25 06:47:08 +0000116; AVX2-LABEL: combine_shuffle_zero_pmuludq_256:
117; AVX2: # %bb.0:
118; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
119; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
120; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
121; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
122; AVX2-NEXT: retq
123;
124; AVX512VL-LABEL: combine_shuffle_zero_pmuludq_256:
125; AVX512VL: # %bb.0:
126; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
127; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
128; AVX512VL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
129; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
130; AVX512VL-NEXT: retq
131;
132; AVX512DQVL-LABEL: combine_shuffle_zero_pmuludq_256:
133; AVX512DQVL: # %bb.0:
134; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
135; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
136; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
Craig Topper72bbbeb2017-12-27 19:09:40 +0000137; AVX512DQVL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
Craig Topperb28460a2017-12-25 06:47:08 +0000138; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +0000139 %1 = shufflevector <8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
140 %2 = shufflevector <8 x i32> %a1, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
141 %3 = bitcast <8 x i32> %1 to <4 x i64>
142 %4 = bitcast <8 x i32> %2 to <4 x i64>
143 %5 = mul <4 x i64> %3, %4
144 ret <4 x i64> %5
145}
Benjamin Kramer293f3432017-12-27 13:31:50 +0000146
147define <8 x i64> @combine_zext_pmuludq_256(<8 x i32> %a) {
148; SSE-LABEL: combine_zext_pmuludq_256:
149; SSE: # %bb.0:
150; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,0,1]
151; SSE-NEXT: pmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero
152; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,0,1]
153; SSE-NEXT: pmovzxdq {{.*#+}} xmm4 = xmm2[0],zero,xmm2[1],zero
154; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero
155; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
156; SSE-NEXT: movdqa {{.*#+}} xmm1 = [715827883,715827883]
157; SSE-NEXT: pmuludq %xmm1, %xmm0
158; SSE-NEXT: pmuludq %xmm1, %xmm2
159; SSE-NEXT: pmuludq %xmm1, %xmm4
160; SSE-NEXT: pmuludq %xmm1, %xmm3
161; SSE-NEXT: movdqa %xmm4, %xmm1
162; SSE-NEXT: retq
163;
164; AVX2-LABEL: combine_zext_pmuludq_256:
165; AVX2: # %bb.0:
166; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1
167; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
168; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
169; AVX2-NEXT: vpbroadcastq {{.*#+}} ymm2 = [715827883,715827883,715827883,715827883]
170; AVX2-NEXT: vpmuludq %ymm2, %ymm0, %ymm0
171; AVX2-NEXT: vpmuludq %ymm2, %ymm1, %ymm1
172; AVX2-NEXT: retq
173;
174; AVX512VL-LABEL: combine_zext_pmuludq_256:
175; AVX512VL: # %bb.0:
176; AVX512VL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
177; AVX512VL-NEXT: vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0
178; AVX512VL-NEXT: retq
179;
180; AVX512DQVL-LABEL: combine_zext_pmuludq_256:
181; AVX512DQVL: # %bb.0:
182; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero
183; AVX512DQVL-NEXT: vpmuludq {{.*}}(%rip){1to8}, %zmm0, %zmm0
184; AVX512DQVL-NEXT: retq
185 %1 = zext <8 x i32> %a to <8 x i64>
186 %2 = mul nuw nsw <8 x i64> %1, <i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883, i64 715827883>
187 ret <8 x i64> %2
188}