blob: 09a142aa831b5196a4c4837f4ecfd22d15b1ee60 [file] [log] [blame]
Simon Pilgrimf0766382017-06-26 16:22:52 +00001; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefix=AVX
4
5; TODO - shuffle+sext are superfluous
6define <2 x i64> @combine_shuffle_sext_pmuldq(<4 x i32> %a0, <4 x i32> %a1) {
7; SSE-LABEL: combine_shuffle_sext_pmuldq:
8; SSE: # BB#0:
9; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
10; SSE-NEXT: pmovsxdq %xmm0, %xmm2
11; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
12; SSE-NEXT: pmovsxdq %xmm0, %xmm0
13; SSE-NEXT: pmuldq %xmm2, %xmm0
14; SSE-NEXT: retq
15;
16; AVX-LABEL: combine_shuffle_sext_pmuldq:
17; AVX: # BB#0:
18; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
19; AVX-NEXT: vpmovsxdq %xmm0, %xmm0
20; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
21; AVX-NEXT: vpmovsxdq %xmm1, %xmm1
22; AVX-NEXT: vpmuldq %xmm1, %xmm0, %xmm0
23; AVX-NEXT: retq
24 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
25 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
26 %3 = sext <2 x i32> %1 to <2 x i64>
27 %4 = sext <2 x i32> %2 to <2 x i64>
28 %5 = mul nuw <2 x i64> %3, %4
29 ret <2 x i64> %5
30}
31
32; TODO - shuffle+zext are superfluous
33define <2 x i64> @combine_shuffle_zext_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
34; SSE-LABEL: combine_shuffle_zext_pmuludq:
35; SSE: # BB#0:
36; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
37; SSE-NEXT: pmovzxdq {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero
38; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[0,2,2,3]
39; SSE-NEXT: pmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
40; SSE-NEXT: pmuludq %xmm2, %xmm0
41; SSE-NEXT: retq
42;
43; AVX-LABEL: combine_shuffle_zext_pmuludq:
44; AVX: # BB#0:
45; AVX-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
46; AVX-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero
47; AVX-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
48; AVX-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
49; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
50; AVX-NEXT: retq
51 %1 = shufflevector <4 x i32> %a0, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
52 %2 = shufflevector <4 x i32> %a1, <4 x i32> undef, <2 x i32> <i32 0, i32 2>
53 %3 = zext <2 x i32> %1 to <2 x i64>
54 %4 = zext <2 x i32> %2 to <2 x i64>
55 %5 = mul nuw <2 x i64> %3, %4
56 ret <2 x i64> %5
57}
58
59; TODO - blends are superfluous
60define <2 x i64> @combine_shuffle_zero_pmuludq(<4 x i32> %a0, <4 x i32> %a1) {
61; SSE-LABEL: combine_shuffle_zero_pmuludq:
62; SSE: # BB#0:
63; SSE-NEXT: pxor %xmm2, %xmm2
64; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
65; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,3],xmm1[4,5],xmm2[6,7]
66; SSE-NEXT: pmuludq %xmm1, %xmm0
67; SSE-NEXT: retq
68;
69; AVX-LABEL: combine_shuffle_zero_pmuludq:
70; AVX: # BB#0:
71; AVX-NEXT: vpxor %xmm2, %xmm2, %xmm2
72; AVX-NEXT: vpblendd {{.*#+}} xmm0 = xmm0[0],xmm2[1],xmm0[2],xmm2[3]
73; AVX-NEXT: vpblendd {{.*#+}} xmm1 = xmm1[0],xmm2[1],xmm1[2],xmm2[3]
74; AVX-NEXT: vpmuludq %xmm1, %xmm0, %xmm0
75; AVX-NEXT: retq
76 %1 = shufflevector <4 x i32> %a0, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
77 %2 = shufflevector <4 x i32> %a1, <4 x i32> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 7>
78 %3 = bitcast <4 x i32> %1 to <2 x i64>
79 %4 = bitcast <4 x i32> %2 to <2 x i64>
80 %5 = mul <2 x i64> %3, %4
81 ret <2 x i64> %5
82}
83
84; TODO - blends are superfluous
85define <4 x i64> @combine_shuffle_zero_pmuludq_256(<8 x i32> %a0, <8 x i32> %a1) {
86; SSE-LABEL: combine_shuffle_zero_pmuludq_256:
87; SSE: # BB#0:
88; SSE-NEXT: pxor %xmm4, %xmm4
89; SSE-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1],xmm4[2,3],xmm1[4,5],xmm4[6,7]
90; SSE-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm4[2,3],xmm0[4,5],xmm4[6,7]
91; SSE-NEXT: pblendw {{.*#+}} xmm3 = xmm3[0,1],xmm4[2,3],xmm3[4,5],xmm4[6,7]
92; SSE-NEXT: pmuludq %xmm3, %xmm1
93; SSE-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm4[2,3],xmm2[4,5],xmm4[6,7]
94; SSE-NEXT: pmuludq %xmm2, %xmm0
95; SSE-NEXT: retq
96;
97; AVX-LABEL: combine_shuffle_zero_pmuludq_256:
98; AVX: # BB#0:
99; AVX-NEXT: vpxor %ymm2, %ymm2, %ymm2
100; AVX-NEXT: vpblendd {{.*#+}} ymm0 = ymm0[0],ymm2[1],ymm0[2],ymm2[3],ymm0[4],ymm2[5],ymm0[6],ymm2[7]
101; AVX-NEXT: vpblendd {{.*#+}} ymm1 = ymm1[0],ymm2[1],ymm1[2],ymm2[3],ymm1[4],ymm2[5],ymm1[6],ymm2[7]
102; AVX-NEXT: vpmuludq %ymm1, %ymm0, %ymm0
103; AVX-NEXT: retq
104 %1 = shufflevector <8 x i32> %a0, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
105 %2 = shufflevector <8 x i32> %a1, <8 x i32> zeroinitializer, <8 x i32> <i32 0, i32 9, i32 2, i32 11, i32 4, i32 13, i32 6, i32 15>
106 %3 = bitcast <8 x i32> %1 to <4 x i64>
107 %4 = bitcast <8 x i32> %2 to <4 x i64>
108 %5 = mul <4 x i64> %3, %4
109 ret <4 x i64> %5
110}