blob: 70a05fa8036e717834f8271adb68522ebf2d3dbc [file] [log] [blame]
Bob Wilson2ed33462009-07-08 20:32:02 +00001; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
2; RUN: grep {vst1\\.8} %t | count 2
3; RUN: grep {vst1\\.16} %t | count 2
4; RUN: grep {vst1\\.32} %t | count 4
5; RUN: grep {vst1\\.64} %t | count 2
6
7define void @vst1i8(i8* %A, <8 x i8>* %B) nounwind {
8 %tmp1 = load <8 x i8>* %B
9 call void @llvm.arm.neon.vsti.v8i8(i8* %A, <8 x i8> %tmp1, i32 1)
10 ret void
11}
12
13define void @vst1i16(i16* %A, <4 x i16>* %B) nounwind {
14 %tmp1 = load <4 x i16>* %B
15 call void @llvm.arm.neon.vsti.v4i16(i16* %A, <4 x i16> %tmp1, i32 1)
16 ret void
17}
18
19define void @vst1i32(i32* %A, <2 x i32>* %B) nounwind {
20 %tmp1 = load <2 x i32>* %B
21 call void @llvm.arm.neon.vsti.v2i32(i32* %A, <2 x i32> %tmp1, i32 1)
22 ret void
23}
24
25define void @vst1f(float* %A, <2 x float>* %B) nounwind {
26 %tmp1 = load <2 x float>* %B
27 call void @llvm.arm.neon.vstf.v2f32(float* %A, <2 x float> %tmp1, i32 1)
28 ret void
29}
30
31define void @vst1i64(i64* %A, <1 x i64>* %B) nounwind {
32 %tmp1 = load <1 x i64>* %B
33 call void @llvm.arm.neon.vsti.v1i64(i64* %A, <1 x i64> %tmp1, i32 1)
34 ret void
35}
36
37define void @vst1Qi8(i8* %A, <16 x i8>* %B) nounwind {
38 %tmp1 = load <16 x i8>* %B
39 call void @llvm.arm.neon.vsti.v16i8(i8* %A, <16 x i8> %tmp1, i32 1)
40 ret void
41}
42
43define void @vst1Qi16(i16* %A, <8 x i16>* %B) nounwind {
44 %tmp1 = load <8 x i16>* %B
45 call void @llvm.arm.neon.vsti.v8i16(i16* %A, <8 x i16> %tmp1, i32 1)
46 ret void
47}
48
49define void @vst1Qi32(i32* %A, <4 x i32>* %B) nounwind {
50 %tmp1 = load <4 x i32>* %B
51 call void @llvm.arm.neon.vsti.v4i32(i32* %A, <4 x i32> %tmp1, i32 1)
52 ret void
53}
54
55define void @vst1Qf(float* %A, <4 x float>* %B) nounwind {
56 %tmp1 = load <4 x float>* %B
57 call void @llvm.arm.neon.vstf.v4f32(float* %A, <4 x float> %tmp1, i32 1)
58 ret void
59}
60
61define void @vst1Qi64(i64* %A, <2 x i64>* %B) nounwind {
62 %tmp1 = load <2 x i64>* %B
63 call void @llvm.arm.neon.vsti.v2i64(i64* %A, <2 x i64> %tmp1, i32 1)
64 ret void
65}
66
67declare void @llvm.arm.neon.vsti.v8i8(i8*, <8 x i8>, i32) nounwind readnone
68declare void @llvm.arm.neon.vsti.v4i16(i16*, <4 x i16>, i32) nounwind readnone
69declare void @llvm.arm.neon.vsti.v2i32(i32*, <2 x i32>, i32) nounwind readnone
70declare void @llvm.arm.neon.vstf.v2f32(float*, <2 x float>, i32) nounwind readnone
71declare void @llvm.arm.neon.vsti.v1i64(i64*, <1 x i64>, i32) nounwind readnone
72
73declare void @llvm.arm.neon.vsti.v16i8(i8*, <16 x i8>, i32) nounwind readnone
74declare void @llvm.arm.neon.vsti.v8i16(i16*, <8 x i16>, i32) nounwind readnone
75declare void @llvm.arm.neon.vsti.v4i32(i32*, <4 x i32>, i32) nounwind readnone
76declare void @llvm.arm.neon.vstf.v4f32(float*, <4 x float>, i32) nounwind readnone
77declare void @llvm.arm.neon.vsti.v2i64(i64*, <2 x i64>, i32) nounwind readnone