blob: cc75a13436ab3f50c282312edb9c6cf4c70e02c7 [file] [log] [blame]
Evan Cheng62061242010-05-17 19:51:20 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
2; Implementing vld / vst as REG_SEQUENCE eliminates the extra vmov's.
3
4%struct.int16x8_t = type { <8 x i16> }
5%struct.int32x4_t = type { <4 x i32> }
Evan Cheng53c779b2010-05-17 20:57:12 +00006%struct.__neon_int8x8x2_t = type { <8 x i8>, <8 x i8> }
Evan Cheng62061242010-05-17 19:51:20 +00007%struct.__neon_int8x8x3_t = type { <8 x i8>, <8 x i8>, <8 x i8> }
8%struct.__neon_int16x8x2_t = type { <8 x i16>, <8 x i16> }
9%struct.__neon_int32x4x2_t = type { <4 x i32>, <4 x i32> }
10
11define arm_apcscc void @t1(i16* %i_ptr, i16* %o_ptr, %struct.int32x4_t* nocapture %vT0ptr, %struct.int32x4_t* nocapture %vT1ptr) nounwind {
12entry:
13; CHECK: t1:
14; CHECK: vld1.16
15; CHECK-NOT: vmov d
16; CHECK: vmovl.s16
17; CHECK: vshrn.i32
18; CHECK: vshrn.i32
19; CHECK-NOT: vmov d
20; CHECK-NEXT: vst1.16
21 %0 = getelementptr inbounds %struct.int32x4_t* %vT0ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
22 %1 = load <4 x i32>* %0, align 16 ; <<4 x i32>> [#uses=1]
23 %2 = getelementptr inbounds %struct.int32x4_t* %vT1ptr, i32 0, i32 0 ; <<4 x i32>*> [#uses=1]
24 %3 = load <4 x i32>* %2, align 16 ; <<4 x i32>> [#uses=1]
25 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
26 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
27 %6 = bitcast <8 x i16> %5 to <2 x double> ; <<2 x double>> [#uses=2]
28 %7 = extractelement <2 x double> %6, i32 0 ; <double> [#uses=1]
29 %8 = bitcast double %7 to <4 x i16> ; <<4 x i16>> [#uses=1]
30 %9 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %8) ; <<4 x i32>> [#uses=1]
31 %10 = extractelement <2 x double> %6, i32 1 ; <double> [#uses=1]
32 %11 = bitcast double %10 to <4 x i16> ; <<4 x i16>> [#uses=1]
33 %12 = tail call <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16> %11) ; <<4 x i32>> [#uses=1]
34 %13 = mul <4 x i32> %1, %9 ; <<4 x i32>> [#uses=1]
35 %14 = mul <4 x i32> %3, %12 ; <<4 x i32>> [#uses=1]
36 %15 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %13, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
37 %16 = tail call <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32> %14, <4 x i32> <i32 -12, i32 -12, i32 -12, i32 -12>) ; <<4 x i16>> [#uses=1]
38 %17 = shufflevector <4 x i16> %15, <4 x i16> %16, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7> ; <<8 x i16>> [#uses=1]
39 %18 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
40 tail call void @llvm.arm.neon.vst1.v8i16(i8* %18, <8 x i16> %17)
41 ret void
42}
43
44define arm_apcscc void @t2(i16* %i_ptr, i16* %o_ptr, %struct.int16x8_t* nocapture %vT0ptr, %struct.int16x8_t* nocapture %vT1ptr) nounwind {
45entry:
46; CHECK: t2:
47; CHECK: vld1.16
48; CHECK-NOT: vmov
49; CHECK: vmul.i16
50; CHECK: vld1.16
51; CHECK: vst1.16
52; CHECK-NOT: vmov
53; CHECK: vmul.i16
54; CHECK: vst1.16
55 %0 = getelementptr inbounds %struct.int16x8_t* %vT0ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
56 %1 = load <8 x i16>* %0, align 16 ; <<8 x i16>> [#uses=1]
57 %2 = getelementptr inbounds %struct.int16x8_t* %vT1ptr, i32 0, i32 0 ; <<8 x i16>*> [#uses=1]
58 %3 = load <8 x i16>* %2, align 16 ; <<8 x i16>> [#uses=1]
59 %4 = bitcast i16* %i_ptr to i8* ; <i8*> [#uses=1]
60 %5 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %4) ; <<8 x i16>> [#uses=1]
61 %6 = getelementptr inbounds i16* %i_ptr, i32 8 ; <i16*> [#uses=1]
62 %7 = bitcast i16* %6 to i8* ; <i8*> [#uses=1]
63 %8 = tail call <8 x i16> @llvm.arm.neon.vld1.v8i16(i8* %7) ; <<8 x i16>> [#uses=1]
64 %9 = mul <8 x i16> %1, %5 ; <<8 x i16>> [#uses=1]
65 %10 = mul <8 x i16> %3, %8 ; <<8 x i16>> [#uses=1]
66 %11 = bitcast i16* %o_ptr to i8* ; <i8*> [#uses=1]
67 tail call void @llvm.arm.neon.vst1.v8i16(i8* %11, <8 x i16> %9)
68 %12 = getelementptr inbounds i16* %o_ptr, i32 8 ; <i16*> [#uses=1]
69 %13 = bitcast i16* %12 to i8* ; <i8*> [#uses=1]
70 tail call void @llvm.arm.neon.vst1.v8i16(i8* %13, <8 x i16> %10)
71 ret void
72}
73
74define <8 x i8> @t3(i8* %A, i8* %B) nounwind {
75; CHECK: t3:
76; CHECK: vld3.8
77; CHECK: vmul.i8
78; CHECK-NOT: vmov
79; CHECK: vst3.8
80 %tmp1 = call %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8* %A) ; <%struct.__neon_int8x8x3_t> [#uses=2]
81 %tmp2 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 0 ; <<8 x i8>> [#uses=1]
82 %tmp3 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 2 ; <<8 x i8>> [#uses=1]
83 %tmp4 = extractvalue %struct.__neon_int8x8x3_t %tmp1, 1 ; <<8 x i8>> [#uses=1]
84 %tmp5 = sub <8 x i8> %tmp3, %tmp4
85 %tmp6 = add <8 x i8> %tmp2, %tmp3 ; <<8 x i8>> [#uses=1]
86 %tmp7 = mul <8 x i8> %tmp4, %tmp2
87 tail call void @llvm.arm.neon.vst3.v8i8(i8* %B, <8 x i8> %tmp5, <8 x i8> %tmp6, <8 x i8> %tmp7)
88 ret <8 x i8> %tmp4
89}
90
91define arm_apcscc void @t4(i32* %in, i32* %out) nounwind {
92entry:
93; CHECK: t4:
94; CHECK: vld2.32
95; CHECK-NOT: vmov
96; CHECK: vld2.32
97; CHECK-NOT: vmov
98; CHECK: bne
99 %tmp1 = bitcast i32* %in to i8* ; <i8*> [#uses=1]
100 %tmp2 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp1) ; <%struct.__neon_int32x4x2_t> [#uses=2]
101 %tmp3 = getelementptr inbounds i32* %in, i32 8 ; <i32*> [#uses=1]
102 %tmp4 = bitcast i32* %tmp3 to i8* ; <i8*> [#uses=1]
103 %tmp5 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %tmp4) ; <%struct.__neon_int32x4x2_t> [#uses=2]
104 %tmp8 = bitcast i32* %out to i8* ; <i8*> [#uses=1]
105 br i1 undef, label %return1, label %return2
106
107return1:
108; CHECK: %return1
109; CHECK-NOT: vmov
110; CHECK-NEXT: vadd.i32
111; CHECK-NEXT: vadd.i32
112; CHECK-NEXT: vst2.32
113 %tmp52 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
114 %tmp57 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 1 ; <<4 x i32>> [#uses=1]
115 %tmp = extractvalue %struct.__neon_int32x4x2_t %tmp5, 0 ; <<4 x i32>> [#uses=1]
116 %tmp39 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
117 %tmp6 = add <4 x i32> %tmp52, %tmp ; <<4 x i32>> [#uses=1]
118 %tmp7 = add <4 x i32> %tmp57, %tmp39 ; <<4 x i32>> [#uses=1]
119 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp6, <4 x i32> %tmp7)
120 ret void
121
122return2:
123; CHECK: %return2
124; CHECK: vadd.i32
125; CHECK: vmov q1, q3
126; CHECK-NOT: vmov
127; CHECK: vst2.32 {d0, d1, d2, d3}
128 %tmp100 = extractvalue %struct.__neon_int32x4x2_t %tmp2, 0 ; <<4 x i32>> [#uses=1]
129 %tmp101 = extractvalue %struct.__neon_int32x4x2_t %tmp5, 1 ; <<4 x i32>> [#uses=1]
130 %tmp102 = add <4 x i32> %tmp100, %tmp101 ; <<4 x i32>> [#uses=1]
131 tail call void @llvm.arm.neon.vst2.v4i32(i8* %tmp8, <4 x i32> %tmp102, <4 x i32> %tmp101)
132 call void @llvm.trap()
133 unreachable
134}
135
136define <8 x i16> @t5(i16* %A, <8 x i16>* %B) nounwind {
137; CHECK: t5:
138; CHECK: vldmia
139; CHECK: vmov q1, q0
140; CHECK-NOT: vmov
141; CHECK: vld2.16 {d0[1], d2[1]}, [r0]
142; CHECK-NOT: vmov
143; CHECK: vadd.i16
144 %tmp0 = bitcast i16* %A to i8* ; <i8*> [#uses=1]
145 %tmp1 = load <8 x i16>* %B ; <<8 x i16>> [#uses=2]
146 %tmp2 = call %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8* %tmp0, <8 x i16> %tmp1, <8 x i16> %tmp1, i32 1) ; <%struct.__neon_int16x8x2_t> [#uses=2]
147 %tmp3 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 0 ; <<8 x i16>> [#uses=1]
148 %tmp4 = extractvalue %struct.__neon_int16x8x2_t %tmp2, 1 ; <<8 x i16>> [#uses=1]
149 %tmp5 = add <8 x i16> %tmp3, %tmp4 ; <<8 x i16>> [#uses=1]
150 ret <8 x i16> %tmp5
151}
152
Evan Cheng53c779b2010-05-17 20:57:12 +0000153define <8 x i8> @t6(i8* %A, <8 x i8>* %B) nounwind {
154; CHECK: t6:
155; CHECK: vldr.64
156; CHECK: vmov d1, d0
157; CHECK-NEXT: vld2.8 {d0[1], d1[1]}
158 %tmp1 = load <8 x i8>* %B ; <<8 x i8>> [#uses=2]
159 %tmp2 = call %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8* %A, <8 x i8> %tmp1, <8 x i8> %tmp1, i32 1) ; <%struct.__neon_int8x8x2_t> [#uses=2]
160 %tmp3 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 0 ; <<8 x i8>> [#uses=1]
161 %tmp4 = extractvalue %struct.__neon_int8x8x2_t %tmp2, 1 ; <<8 x i8>> [#uses=1]
162 %tmp5 = add <8 x i8> %tmp3, %tmp4 ; <<8 x i8>> [#uses=1]
163 ret <8 x i8> %tmp5
164}
165
166define arm_apcscc void @t7(i32* %iptr, i32* %optr) nounwind {
167entry:
168; CHECK: t7:
169; CHECK: vld2.32
170; CHECK: vst2.32
171; CHECK: vld1.32 {d0, d1},
172; CHECK: vmov q1, q0
173; CHECK-NOT: vmov
174; CHECK: vuzp.32 q0, q1
175; CHECK: vst1.32
176 %0 = bitcast i32* %iptr to i8* ; <i8*> [#uses=2]
177 %1 = tail call %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8* %0) ; <%struct.__neon_int32x4x2_t> [#uses=2]
178 %tmp57 = extractvalue %struct.__neon_int32x4x2_t %1, 0 ; <<4 x i32>> [#uses=1]
179 %tmp60 = extractvalue %struct.__neon_int32x4x2_t %1, 1 ; <<4 x i32>> [#uses=1]
180 %2 = bitcast i32* %optr to i8* ; <i8*> [#uses=2]
181 tail call void @llvm.arm.neon.vst2.v4i32(i8* %2, <4 x i32> %tmp57, <4 x i32> %tmp60)
182 %3 = tail call <4 x i32> @llvm.arm.neon.vld1.v4i32(i8* %0) ; <<4 x i32>> [#uses=1]
183 %4 = shufflevector <4 x i32> %3, <4 x i32> undef, <4 x i32> <i32 0, i32 2, i32 0, i32 2> ; <<4 x i32>> [#uses=1]
184 tail call void @llvm.arm.neon.vst1.v4i32(i8* %2, <4 x i32> %4)
185 ret void
186}
187
Evan Cheng44bfdd32010-05-17 22:09:49 +0000188; PR7156
189define arm_aapcs_vfpcc i32 @t8() nounwind {
190; CHECK: t8:
191; CHECK: vrsqrte.f32 q0, q0
192bb.nph55.bb.nph55.split_crit_edge:
193 br label %bb3
194
195bb3: ; preds = %bb3, %bb.nph55.bb.nph55.split_crit_edge
196 br i1 undef, label %bb5, label %bb3
197
198bb5: ; preds = %bb3
199 br label %bb.i25
200
201bb.i25: ; preds = %bb.i25, %bb5
202 %0 = shufflevector <2 x float> undef, <2 x float> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
203 %1 = call <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float> %0) nounwind ; <<4 x float>> [#uses=1]
204 %2 = fmul <4 x float> %1, undef ; <<4 x float>> [#uses=1]
205 %3 = fmul <4 x float> undef, %2 ; <<4 x float>> [#uses=1]
206 %tmp26.i = bitcast <4 x float> %3 to <2 x double> ; <<2 x double>> [#uses=1]
207 %4 = extractelement <2 x double> %tmp26.i, i32 0 ; <double> [#uses=1]
208 %5 = bitcast double %4 to <2 x float> ; <<2 x float>> [#uses=1]
209 %6 = extractelement <2 x float> %5, i32 1 ; <float> [#uses=1]
210 store float %6, float* undef, align 4
211 br i1 undef, label %bb6, label %bb.i25
212
213bb6: ; preds = %bb.i25
214 br i1 undef, label %bb7, label %bb14
215
216bb7: ; preds = %bb6
217 br label %bb.i49
218
219bb.i49: ; preds = %bb.i49, %bb7
220 br i1 undef, label %bb.i19, label %bb.i49
221
222bb.i19: ; preds = %bb.i19, %bb.i49
223 br i1 undef, label %exit, label %bb.i19
224
225exit: ; preds = %bb.i19
226 unreachable
227
228bb14: ; preds = %bb6
229 ret i32 0
230}
231
Evan Cheng53c779b2010-05-17 20:57:12 +0000232declare <4 x i32> @llvm.arm.neon.vld1.v4i32(i8*) nounwind readonly
233
Evan Cheng62061242010-05-17 19:51:20 +0000234declare <8 x i16> @llvm.arm.neon.vld1.v8i16(i8*) nounwind readonly
235
236declare <4 x i32> @llvm.arm.neon.vmovls.v4i32(<4 x i16>) nounwind readnone
237
238declare <4 x i16> @llvm.arm.neon.vshiftn.v4i16(<4 x i32>, <4 x i32>) nounwind readnone
239
Evan Cheng53c779b2010-05-17 20:57:12 +0000240declare void @llvm.arm.neon.vst1.v4i32(i8*, <4 x i32>) nounwind
241
Evan Cheng62061242010-05-17 19:51:20 +0000242declare void @llvm.arm.neon.vst1.v8i16(i8*, <8 x i16>) nounwind
243
244declare void @llvm.arm.neon.vst3.v8i8(i8*, <8 x i8>, <8 x i8>, <8 x i8>) nounwind
245
246declare %struct.__neon_int8x8x3_t @llvm.arm.neon.vld3.v8i8(i8*) nounwind readonly
247
248declare %struct.__neon_int32x4x2_t @llvm.arm.neon.vld2.v4i32(i8*) nounwind readonly
249
Evan Cheng53c779b2010-05-17 20:57:12 +0000250declare %struct.__neon_int8x8x2_t @llvm.arm.neon.vld2lane.v8i8(i8*, <8 x i8>, <8 x i8>, i32) nounwind readonly
251
Evan Cheng62061242010-05-17 19:51:20 +0000252declare %struct.__neon_int16x8x2_t @llvm.arm.neon.vld2lane.v8i16(i8*, <8 x i16>, <8 x i16>, i32) nounwind readonly
253
254declare void @llvm.arm.neon.vst2.v4i32(i8*, <4 x i32>, <4 x i32>) nounwind
255
Evan Cheng44bfdd32010-05-17 22:09:49 +0000256declare <4 x float> @llvm.arm.neon.vrsqrte.v4f32(<4 x float>) nounwind readnone
257
Evan Cheng62061242010-05-17 19:51:20 +0000258declare void @llvm.trap() nounwind