blob: 6319cb887afdb010ee9ebccc8c00deab342ea8a8 [file] [log] [blame]
Chris Lattner075ee992009-08-15 17:05:03 +00001; These are tests for SSE3 codegen. Yonah has SSE3 and earlier but not SSSE3+.
2
Dan Gohman36a09472009-09-08 23:54:48 +00003; RUN: llc < %s -march=x86-64 -mcpu=yonah -mtriple=i686-apple-darwin9\
Chris Lattnerd04fee12009-08-15 17:35:05 +00004; RUN: | FileCheck %s --check-prefix=X64
Mon P Wange91a0002009-01-28 23:11:14 +00005
6; Test for v8xi16 lowering where we extract the first element of the vector and
7; placed it in the second element of the result.
8
Chris Lattner8e1fad42009-08-15 17:21:44 +00009define void @t0(<8 x i16>* %dest, <8 x i16>* %old) nounwind {
Mon P Wange91a0002009-01-28 23:11:14 +000010entry:
Chris Lattner075ee992009-08-15 17:05:03 +000011 %tmp3 = load <8 x i16>* %old
12 %tmp6 = shufflevector <8 x i16> %tmp3,
13 <8 x i16> < i16 0, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef, i16 undef >,
14 <8 x i32> < i32 8, i32 0, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef >
15 store <8 x i16> %tmp6, <8 x i16>* %dest
Mon P Wange91a0002009-01-28 23:11:14 +000016 ret void
Chris Lattner075ee992009-08-15 17:05:03 +000017
Chris Lattner8e1fad42009-08-15 17:21:44 +000018; X64: t0:
Chris Lattner075ee992009-08-15 17:05:03 +000019; X64: movddup (%rsi), %xmm0
Chris Lattner075ee992009-08-15 17:05:03 +000020; X64: xorl %eax, %eax
Evan Chengd36076e2009-10-18 19:57:27 +000021; X64: pshuflw $0, %xmm0, %xmm0
Chris Lattner075ee992009-08-15 17:05:03 +000022; X64: pinsrw $0, %eax, %xmm0
23; X64: movaps %xmm0, (%rdi)
24; X64: ret
Chris Lattner8e1fad42009-08-15 17:21:44 +000025}
26
27define <8 x i16> @t1(<8 x i16>* %A, <8 x i16>* %B) nounwind {
28 %tmp1 = load <8 x i16>* %A
29 %tmp2 = load <8 x i16>* %B
30 %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> < i32 8, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
31 ret <8 x i16> %tmp3
32
33; X64: t1:
34; X64: movl (%rsi), %eax
35; X64: movaps (%rdi), %xmm0
36; X64: pinsrw $0, %eax, %xmm0
37; X64: ret
38}
39
40define <8 x i16> @t2(<8 x i16> %A, <8 x i16> %B) nounwind {
41 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 9, i32 1, i32 2, i32 9, i32 4, i32 5, i32 6, i32 7 >
42 ret <8 x i16> %tmp
43; X64: t2:
44; X64: pextrw $1, %xmm1, %eax
45; X64: pinsrw $0, %eax, %xmm0
46; X64: pinsrw $3, %eax, %xmm0
47; X64: ret
48}
49
50define <8 x i16> @t3(<8 x i16> %A, <8 x i16> %B) nounwind {
51 %tmp = shufflevector <8 x i16> %A, <8 x i16> %A, <8 x i32> < i32 8, i32 3, i32 2, i32 13, i32 7, i32 6, i32 5, i32 4 >
52 ret <8 x i16> %tmp
53; X64: t3:
54; X64: pextrw $5, %xmm0, %eax
Dan Gohmanc1ae8c92009-10-21 01:44:44 +000055; X64: pshuflw $44, %xmm0, %xmm0
56; X64: pshufhw $27, %xmm0, %xmm0
Chris Lattner8e1fad42009-08-15 17:21:44 +000057; X64: pinsrw $3, %eax, %xmm0
58; X64: ret
59}
60
61define <8 x i16> @t4(<8 x i16> %A, <8 x i16> %B) nounwind {
62 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 7, i32 2, i32 3, i32 1, i32 5, i32 6, i32 5 >
63 ret <8 x i16> %tmp
64; X64: t4:
65; X64: pextrw $7, %xmm0, %eax
66; X64: pshufhw $100, %xmm0, %xmm1
67; X64: pinsrw $1, %eax, %xmm1
68; X64: pextrw $1, %xmm0, %eax
69; X64: movaps %xmm1, %xmm0
70; X64: pinsrw $4, %eax, %xmm0
71; X64: ret
72}
73
74define <8 x i16> @t5(<8 x i16> %A, <8 x i16> %B) nounwind {
75 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 0, i32 1, i32 10, i32 11, i32 2, i32 3 >
76 ret <8 x i16> %tmp
77; X64: t5:
78; X64: movlhps %xmm1, %xmm0
79; X64: pshufd $114, %xmm0, %xmm0
80; X64: ret
81}
82
83define <8 x i16> @t6(<8 x i16> %A, <8 x i16> %B) nounwind {
84 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 8, i32 9, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7 >
85 ret <8 x i16> %tmp
86; X64: t6:
87; X64: movss %xmm1, %xmm0
88; X64: ret
89}
90
91define <8 x i16> @t7(<8 x i16> %A, <8 x i16> %B) nounwind {
92 %tmp = shufflevector <8 x i16> %A, <8 x i16> %B, <8 x i32> < i32 0, i32 0, i32 3, i32 2, i32 4, i32 6, i32 4, i32 7 >
93 ret <8 x i16> %tmp
94; X64: t7:
Chris Lattnerd8429622009-09-08 23:05:44 +000095; X64: pshuflw $-80, %xmm0, %xmm0
96; X64: pshufhw $-56, %xmm0, %xmm0
Chris Lattner8e1fad42009-08-15 17:21:44 +000097; X64: ret
98}
99
100define void @t8(<2 x i64>* %res, <2 x i64>* %A) nounwind {
101 %tmp = load <2 x i64>* %A
102 %tmp.upgrd.1 = bitcast <2 x i64> %tmp to <8 x i16>
103 %tmp0 = extractelement <8 x i16> %tmp.upgrd.1, i32 0
104 %tmp1 = extractelement <8 x i16> %tmp.upgrd.1, i32 1
105 %tmp2 = extractelement <8 x i16> %tmp.upgrd.1, i32 2
106 %tmp3 = extractelement <8 x i16> %tmp.upgrd.1, i32 3
107 %tmp4 = extractelement <8 x i16> %tmp.upgrd.1, i32 4
108 %tmp5 = extractelement <8 x i16> %tmp.upgrd.1, i32 5
109 %tmp6 = extractelement <8 x i16> %tmp.upgrd.1, i32 6
110 %tmp7 = extractelement <8 x i16> %tmp.upgrd.1, i32 7
111 %tmp8 = insertelement <8 x i16> undef, i16 %tmp2, i32 0
112 %tmp9 = insertelement <8 x i16> %tmp8, i16 %tmp1, i32 1
113 %tmp10 = insertelement <8 x i16> %tmp9, i16 %tmp0, i32 2
114 %tmp11 = insertelement <8 x i16> %tmp10, i16 %tmp3, i32 3
115 %tmp12 = insertelement <8 x i16> %tmp11, i16 %tmp6, i32 4
116 %tmp13 = insertelement <8 x i16> %tmp12, i16 %tmp5, i32 5
117 %tmp14 = insertelement <8 x i16> %tmp13, i16 %tmp4, i32 6
118 %tmp15 = insertelement <8 x i16> %tmp14, i16 %tmp7, i32 7
119 %tmp15.upgrd.2 = bitcast <8 x i16> %tmp15 to <2 x i64>
120 store <2 x i64> %tmp15.upgrd.2, <2 x i64>* %res
121 ret void
122; X64: t8:
Chris Lattnerd8429622009-09-08 23:05:44 +0000123; X64: pshuflw $-58, (%rsi), %xmm0
124; X64: pshufhw $-58, %xmm0, %xmm0
Chris Lattner8e1fad42009-08-15 17:21:44 +0000125; X64: movaps %xmm0, (%rdi)
126; X64: ret
127}
128
129define void @t9(<4 x float>* %r, <2 x i32>* %A) nounwind {
130 %tmp = load <4 x float>* %r
131 %tmp.upgrd.3 = bitcast <2 x i32>* %A to double*
132 %tmp.upgrd.4 = load double* %tmp.upgrd.3
133 %tmp.upgrd.5 = insertelement <2 x double> undef, double %tmp.upgrd.4, i32 0
134 %tmp5 = insertelement <2 x double> %tmp.upgrd.5, double undef, i32 1
135 %tmp6 = bitcast <2 x double> %tmp5 to <4 x float>
136 %tmp.upgrd.6 = extractelement <4 x float> %tmp, i32 0
137 %tmp7 = extractelement <4 x float> %tmp, i32 1
138 %tmp8 = extractelement <4 x float> %tmp6, i32 0
139 %tmp9 = extractelement <4 x float> %tmp6, i32 1
140 %tmp10 = insertelement <4 x float> undef, float %tmp.upgrd.6, i32 0
141 %tmp11 = insertelement <4 x float> %tmp10, float %tmp7, i32 1
142 %tmp12 = insertelement <4 x float> %tmp11, float %tmp8, i32 2
143 %tmp13 = insertelement <4 x float> %tmp12, float %tmp9, i32 3
144 store <4 x float> %tmp13, <4 x float>* %r
145 ret void
146; X64: t9:
147; X64: movsd (%rsi), %xmm0
148; X64: movhps %xmm0, (%rdi)
149; X64: ret
150}
151
152
153
154; FIXME: This testcase produces icky code. It can be made much better!
155; PR2585
156
157@g1 = external constant <4 x i32>
158@g2 = external constant <4 x i16>
159
160define internal void @t10() nounwind {
161 load <4 x i32>* @g1, align 16
162 bitcast <4 x i32> %1 to <8 x i16>
163 shufflevector <8 x i16> %2, <8 x i16> undef, <8 x i32> < i32 0, i32 2, i32 4, i32 6, i32 undef, i32 undef, i32 undef, i32 undef >
164 bitcast <8 x i16> %3 to <2 x i64>
165 extractelement <2 x i64> %4, i32 0
166 bitcast i64 %5 to <4 x i16>
167 store <4 x i16> %6, <4 x i16>* @g2, align 8
168 ret void
169; X64: t10:
Chris Lattner8e1fad42009-08-15 17:21:44 +0000170; X64: pextrw $4, %xmm0, %eax
Evan Chengb6c215b2009-10-23 05:58:34 +0000171; X64: pextrw $6, %xmm0, %edx
Chris Lattner8e1fad42009-08-15 17:21:44 +0000172; X64: movlhps %xmm1, %xmm1
173; X64: pshuflw $8, %xmm1, %xmm1
174; X64: pinsrw $2, %eax, %xmm1
Evan Chengb6c215b2009-10-23 05:58:34 +0000175; X64: pinsrw $3, %edx, %xmm1
Chris Lattner8e1fad42009-08-15 17:21:44 +0000176}
177
178
179; Pack various elements via shuffles.
180define <8 x i16> @t11(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
181entry:
182 %tmp7 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 1, i32 8, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
183 ret <8 x i16> %tmp7
184
185; X64: t11:
Chris Lattner8e1fad42009-08-15 17:21:44 +0000186; X64: movlhps %xmm0, %xmm0
Evan Chengd36076e2009-10-18 19:57:27 +0000187; X64: movd %xmm1, %eax
Chris Lattner8e1fad42009-08-15 17:21:44 +0000188; X64: pshuflw $1, %xmm0, %xmm0
189; X64: pinsrw $1, %eax, %xmm0
190; X64: ret
191}
192
193
194define <8 x i16> @t12(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
195entry:
196 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 0, i32 1, i32 undef, i32 undef, i32 3, i32 11, i32 undef , i32 undef >
197 ret <8 x i16> %tmp9
198
199; X64: t12:
Chris Lattner8e1fad42009-08-15 17:21:44 +0000200; X64: movlhps %xmm0, %xmm0
Evan Chengd36076e2009-10-18 19:57:27 +0000201; X64: pextrw $3, %xmm1, %eax
Chris Lattner8e1fad42009-08-15 17:21:44 +0000202; X64: pshufhw $3, %xmm0, %xmm0
203; X64: pinsrw $5, %eax, %xmm0
204; X64: ret
205}
206
207
208define <8 x i16> @t13(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
209entry:
210 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 11, i32 3, i32 undef , i32 undef >
211 ret <8 x i16> %tmp9
212; X64: t13:
213; X64: punpcklqdq %xmm0, %xmm1
214; X64: pextrw $3, %xmm1, %eax
215; X64: pshufd $52, %xmm1, %xmm0
216; X64: pinsrw $4, %eax, %xmm0
217; X64: ret
218}
219
220
221define <8 x i16> @t14(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
222entry:
223 %tmp9 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 8, i32 9, i32 undef, i32 undef, i32 undef, i32 2, i32 undef , i32 undef >
224 ret <8 x i16> %tmp9
225; X64: t14:
226; X64: punpcklqdq %xmm0, %xmm1
227; X64: pshufhw $8, %xmm1, %xmm0
228; X64: ret
229}
230
231
232
233define <8 x i16> @t15(<8 x i16> %T0, <8 x i16> %T1) nounwind readnone {
234entry:
235 %tmp8 = shufflevector <8 x i16> %T0, <8 x i16> %T1, <8 x i32> < i32 undef, i32 undef, i32 7, i32 2, i32 8, i32 undef, i32 undef , i32 undef >
236 ret <8 x i16> %tmp8
237; X64: t15:
238; X64: pextrw $7, %xmm0, %eax
239; X64: punpcklqdq %xmm1, %xmm0
Chris Lattnerd8429622009-09-08 23:05:44 +0000240; X64: pshuflw $-128, %xmm0, %xmm0
Chris Lattner8e1fad42009-08-15 17:21:44 +0000241; X64: pinsrw $2, %eax, %xmm0
242; X64: ret
243}
244
245
246; Test yonah where we convert a shuffle to pextrw and pinrsw
247define <16 x i8> @t16(<16 x i8> %T0) nounwind readnone {
248entry:
249 %tmp8 = shufflevector <16 x i8> <i8 0, i8 0, i8 0, i8 0, i8 1, i8 1, i8 1, i8 1, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 16, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
250 %tmp9 = shufflevector <16 x i8> %tmp8, <16 x i8> %T0, <16 x i32> < i32 0, i32 1, i32 2, i32 17, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef, i32 undef , i32 undef >
251 ret <16 x i8> %tmp9
252; X64: t16:
Chris Lattner8e1fad42009-08-15 17:21:44 +0000253; X64: pinsrw $0, %eax, %xmm1
Dan Gohmanc1ae8c92009-10-21 01:44:44 +0000254; X64: pextrw $8, %xmm0, %eax
255; X64: pinsrw $1, %eax, %xmm1
Chris Lattner8e1fad42009-08-15 17:21:44 +0000256; X64: pextrw $1, %xmm1, %ecx
257; X64: movd %xmm1, %edx
258; X64: pinsrw $0, %edx, %xmm1
Chris Lattner8e1fad42009-08-15 17:21:44 +0000259; X64: pinsrw $1, %eax, %xmm0
260; X64: ret
261}