blob: 5a52403394ae91160e366445e2ec841022a2cdb7 [file] [log] [blame]
Chandler Carruth0d6d1f22014-06-27 11:34:40 +00001; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -x86-experimental-vector-shuffle-lowering | FileCheck %s --check-prefix=CHECK-SSE2
Chandler Carruth7b270672014-10-02 07:13:25 +00002;
3; Verify that the DAG combiner correctly folds bitwise operations across
4; shuffles, nested shuffles with undef, pairs of nested shuffles, and other
5; basic and always-safe patterns. Also test that the DAG combiner will combine
6; target-specific shuffle instructions where reasonable.
Chandler Carruth0d6d1f22014-06-27 11:34:40 +00007
8target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
9target triple = "x86_64-unknown-unknown"
10
Chandler Carruth688001f2014-06-27 11:40:13 +000011declare <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32>, i8)
Chandler Carruth0d6d1f22014-06-27 11:34:40 +000012declare <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16>, i8)
13declare <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16>, i8)
14
Chandler Carruth688001f2014-06-27 11:40:13 +000015define <4 x i32> @combine_pshufd1(<4 x i32> %a) {
16; CHECK-SSE2-LABEL: @combine_pshufd1
17; CHECK-SSE2: # BB#0:
18; CHECK-SSE2-NEXT: retq
19 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
20 %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 27)
21 ret <4 x i32> %c
22}
23
24define <4 x i32> @combine_pshufd2(<4 x i32> %a) {
25; CHECK-SSE2-LABEL: @combine_pshufd2
26; CHECK-SSE2: # BB#0:
27; CHECK-SSE2-NEXT: retq
28 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
29 %b.cast = bitcast <4 x i32> %b to <8 x i16>
30 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 -28)
31 %c.cast = bitcast <8 x i16> %c to <4 x i32>
32 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
33 ret <4 x i32> %d
34}
35
36define <4 x i32> @combine_pshufd3(<4 x i32> %a) {
37; CHECK-SSE2-LABEL: @combine_pshufd3
38; CHECK-SSE2: # BB#0:
39; CHECK-SSE2-NEXT: retq
40 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 27)
41 %b.cast = bitcast <4 x i32> %b to <8 x i16>
42 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 -28)
43 %c.cast = bitcast <8 x i16> %c to <4 x i32>
44 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 27)
45 ret <4 x i32> %d
46}
47
48define <4 x i32> @combine_pshufd4(<4 x i32> %a) {
49; CHECK-SSE2-LABEL: @combine_pshufd4
50; CHECK-SSE2: # BB#0:
51; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
52; CHECK-SSE2-NEXT: retq
53 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -31)
54 %b.cast = bitcast <4 x i32> %b to <8 x i16>
55 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b.cast, i8 27)
56 %c.cast = bitcast <8 x i16> %c to <4 x i32>
57 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -31)
58 ret <4 x i32> %d
59}
60
61define <4 x i32> @combine_pshufd5(<4 x i32> %a) {
62; CHECK-SSE2-LABEL: @combine_pshufd5
63; CHECK-SSE2: # BB#0:
64; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
65; CHECK-SSE2-NEXT: retq
66 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 -76)
67 %b.cast = bitcast <4 x i32> %b to <8 x i16>
68 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b.cast, i8 27)
69 %c.cast = bitcast <8 x i16> %c to <4 x i32>
70 %d = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %c.cast, i8 -76)
71 ret <4 x i32> %d
72}
73
Benjamin Kramere739cf32014-07-02 15:09:44 +000074define <4 x i32> @combine_pshufd6(<4 x i32> %a) {
75; CHECK-SSE2-LABEL: @combine_pshufd6
76; CHECK-SSE2: # BB#0:
77; CHECK-SSE2-NEXT: pshufd $0
78; CHECK-SSE2-NEXT: retq
79 %b = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %a, i8 0)
80 %c = call <4 x i32> @llvm.x86.sse2.pshuf.d(<4 x i32> %b, i8 8)
81 ret <4 x i32> %c
82}
83
Chandler Carruth0d6d1f22014-06-27 11:34:40 +000084define <8 x i16> @combine_pshuflw1(<8 x i16> %a) {
85; CHECK-SSE2-LABEL: @combine_pshuflw1
86; CHECK-SSE2: # BB#0:
87; CHECK-SSE2-NEXT: retq
88 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
89 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
90 ret <8 x i16> %c
91}
92
93define <8 x i16> @combine_pshuflw2(<8 x i16> %a) {
94; CHECK-SSE2-LABEL: @combine_pshuflw2
95; CHECK-SSE2: # BB#0:
96; CHECK-SSE2-NEXT: retq
97 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
98 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 -28)
99 %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
100 ret <8 x i16> %d
101}
102
103define <8 x i16> @combine_pshuflw3(<8 x i16> %a) {
104; CHECK-SSE2-LABEL: @combine_pshuflw3
105; CHECK-SSE2: # BB#0:
106; CHECK-SSE2-NEXT: pshufhw {{.*}} # xmm0 = xmm0[0,1,2,3,7,6,5,4]
107; CHECK-SSE2-NEXT: retq
108 %b = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %a, i8 27)
109 %c = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %b, i8 27)
110 %d = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %c, i8 27)
111 ret <8 x i16> %d
112}
113
114define <8 x i16> @combine_pshufhw1(<8 x i16> %a) {
115; CHECK-SSE2-LABEL: @combine_pshufhw1
116; CHECK-SSE2: # BB#0:
117; CHECK-SSE2-NEXT: pshuflw {{.*}} # xmm0 = xmm0[3,2,1,0,4,5,6,7]
118; CHECK-SSE2-NEXT: retq
119 %b = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %a, i8 27)
120 %c = call <8 x i16> @llvm.x86.sse2.pshufl.w(<8 x i16> %b, i8 27)
121 %d = call <8 x i16> @llvm.x86.sse2.pshufh.w(<8 x i16> %c, i8 27)
122 ret <8 x i16> %d
123}
124