blob: aa318c68a88dfc80a8d33aa5d1d94353066c3e50 [file] [log] [blame]
Simon Pilgrimf0766382017-06-26 16:22:52 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
Craig Topperb28460a2017-12-25 06:47:08 +00003; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512VL
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx512vl,+avx512dq | FileCheck %s --check-prefix=AVX --check-prefix=AVX512DQVL
Simon Pilgrimf0766382017-06-26 16:22:52 +00006
7; TODO - shuffle+sext are superfluous
8define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
9; SSE-LABEL: combine_shuffle_sext_pmuldq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000010; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +000011; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
12; SSE-NEXT: pmovsxdq %xmm0, %xmm2
13; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
14; SSE-NEXT: pmovsxdq %xmm0, %xmm0
15; SSE-NEXT: pmuldq %xmm2, %xmm0
16; SSE-NEXT: retq
17;
Craig Topperb28460a2017-12-25 06:47:08 +000018; AVX2-LABEL: combine_shuffle_sext_pmuldq:
19; AVX2: # %bb.0:
20; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
21; AVX2-NEXT: vpmovsxdq %xmm0, %xmm0
22; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
23; AVX2-NEXT: vpmovsxdq %xmm1, %xmm1
24; AVX2-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
25; AVX2-NEXT: retq
26;
27; AVX512VL-LABEL: combine_shuffle_sext_pmuldq:
28; AVX512VL: # %bb.0:
29; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
30; AVX512VL-NEXT: vpmovsxdq %xmm0, %xmm0
31; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
32; AVX512VL-NEXT: vpmovsxdq %xmm1, %xmm1
33; AVX512VL-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
34; AVX512VL-NEXT: retq
35;
36; AVX512DQVL-LABEL: combine_shuffle_sext_pmuldq:
37; AVX512DQVL: # %bb.0:
38; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
39; AVX512DQVL-NEXT: vpmovsxdq %xmm0, %xmm0
40; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
41; AVX512DQVL-NEXT: vpmovsxdq %xmm1, %xmm1
42; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
43; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +000044 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
45 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
46 %3 = sext <2 x i32> %1 to <2 x i64>
47 %4 = sext <2 x i32> %2 to <2 x i64>
48 %5 = mul nuw <2 x i64> %3, %4
49 ret <2 x i64> %5
50}
51
52; TODO - shuffle+zext are superfluous
53define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
54; SSE-LABEL: combine_shuffle_zext_pmuludq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +000055; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +000056; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
57; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
58; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
59; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
60; SSE-NEXT: pmuludq %xmm2, %xmm0
61; SSE-NEXT: retq
62;
Craig Topperb28460a2017-12-25 06:47:08 +000063; AVX2-LABEL: combine_shuffle_zext_pmuludq:
64; AVX2: # %bb.0:
65; AVX2-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
66; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
67; AVX2-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
68; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
69; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
70; AVX2-NEXT: retq
71;
72; AVX512VL-LABEL: combine_shuffle_zext_pmuludq:
73; AVX512VL: # %bb.0:
74; AVX512VL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
75; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
76; AVX512VL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
77; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
78; AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
79; AVX512VL-NEXT: retq
80;
81; AVX512DQVL-LABEL: combine_shuffle_zext_pmuludq:
82; AVX512DQVL: # %bb.0:
83; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
84; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
85; AVX512DQVL-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
86; AVX512DQVL-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
87; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
88; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +000089 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
90 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
91 %3 = zext <2 x i32> %1 to <2 x i64>
92 %4 = zext <2 x i32> %2 to <2 x i64>
93 %5 = mul nuw <2 x i64> %3, %4
94 ret <2 x i64> %5
95}
96
97; TODO - blends are superfluous
98define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
99; SSE-LABEL: combine_shuffle_zero_pmuludq:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000100; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +0000101; SSE-NEXT: pxor %xmm2, %xmm2
102; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
103; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
104; SSE-NEXT: pmuludq %xmm1, %xmm0
105; SSE-NEXT: retq
106;
Craig Topperb28460a2017-12-25 06:47:08 +0000107; AVX2-LABEL: combine_shuffle_zero_pmuludq:
108; AVX2: # %bb.0:
109; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
110; AVX2-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
111; AVX2-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
112; AVX2-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
113; AVX2-NEXT: retq
114;
115; AVX512VL-LABEL: combine_shuffle_zero_pmuludq:
116; AVX512VL: # %bb.0:
117; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
118; AVX512VL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
119; AVX512VL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
120; AVX512VL-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
121; AVX512VL-NEXT: retq
122;
123; AVX512DQVL-LABEL: combine_shuffle_zero_pmuludq:
124; AVX512DQVL: # %bb.0:
125; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
126; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
127; AVX512DQVL-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
128; AVX512DQVL-NEXT: vpmullq %xmm1, %xmm0, %xmm0
129; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +0000130 %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
131 %2 = shufflevector <4 x i32> %a1, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
132 %3 = bitcast <4 x i32> %1 to <2 x i64>
133 %4 = bitcast <4 x i32> %2 to <2 x i64>
134 %5 = mul <2 x i64> %3, %4
135 ret <2 x i64> %5
136}
137
138; TODO - blends are superfluous
139define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
140; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
Francis Visoiu Mistrih25528d62017-12-04 17:18:51 +0000141; SSE: # %bb.0:
Simon Pilgrimf0766382017-06-26 16:22:52 +0000142; SSE-NEXT: pxor %xmm4, %xmm4
143; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
144; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
145; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
146; SSE-NEXT: pmuludq %xmm3, %xmm1
147; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
148; SSE-NEXT: pmuludq %xmm2, %xmm0
149; SSE-NEXT: retq
150;
Craig Topperb28460a2017-12-25 06:47:08 +0000151; AVX2-LABEL: combine_shuffle_zero_pmuludq_256:
152; AVX2: # %bb.0:
153; AVX2-NEXT: vpxor %xmm2, %xmm2, %xmm2
154; AVX2-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
155; AVX2-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
156; AVX2-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
157; AVX2-NEXT: retq
158;
159; AVX512VL-LABEL: combine_shuffle_zero_pmuludq_256:
160; AVX512VL: # %bb.0:
161; AVX512VL-NEXT: vpxor %xmm2, %xmm2, %xmm2
162; AVX512VL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
163; AVX512VL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
164; AVX512VL-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
165; AVX512VL-NEXT: retq
166;
167; AVX512DQVL-LABEL: combine_shuffle_zero_pmuludq_256:
168; AVX512DQVL: # %bb.0:
169; AVX512DQVL-NEXT: vpxor %xmm2, %xmm2, %xmm2
170; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
171; AVX512DQVL-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
172; AVX512DQVL-NEXT: vpmullq %ymm1, %ymm0, %ymm0
173; AVX512DQVL-NEXT: retq
Simon Pilgrimf0766382017-06-26 16:22:52 +0000174 %1 = shufflevector <8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
175 %2 = shufflevector <8 x i32> %a1, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
176 %3 = bitcast <8 x i32> %1 to <4 x i64>
177 %4 = bitcast <8 x i32> %2 to <4 x i64>
178 %5 = mul <4 x i64> %3, %4
179 ret <4 x i64> %5
180}