blob: 7fd00bac5e2499a57880c010838540e8d47977ff [file] [log] [blame]
Bob Wilsone60fee02009-06-22 23:27:02 +00001; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
2; RUN: grep {vmlal\\.s8} %t | count 1
3; RUN: grep {vmlal\\.s16} %t | count 1
4; RUN: grep {vmlal\\.s32} %t | count 1
5; RUN: grep {vmlal\\.u8} %t | count 1
6; RUN: grep {vmlal\\.u16} %t | count 1
7; RUN: grep {vmlal\\.u32} %t | count 1
8
9define <8 x i16> @vmlals8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
10 %tmp1 = load <8 x i16>* %A
11 %tmp2 = load <8 x i8>* %B
12 %tmp3 = load <8 x i8>* %C
13 %tmp4 = call <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
14 ret <8 x i16> %tmp4
15}
16
17define <4 x i32> @vmlals16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
18 %tmp1 = load <4 x i32>* %A
19 %tmp2 = load <4 x i16>* %B
20 %tmp3 = load <4 x i16>* %C
21 %tmp4 = call <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
22 ret <4 x i32> %tmp4
23}
24
25define <2 x i64> @vmlals32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
26 %tmp1 = load <2 x i64>* %A
27 %tmp2 = load <2 x i32>* %B
28 %tmp3 = load <2 x i32>* %C
29 %tmp4 = call <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
30 ret <2 x i64> %tmp4
31}
32
33define <8 x i16> @vmlalu8(<8 x i16>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind {
34 %tmp1 = load <8 x i16>* %A
35 %tmp2 = load <8 x i8>* %B
36 %tmp3 = load <8 x i8>* %C
37 %tmp4 = call <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3)
38 ret <8 x i16> %tmp4
39}
40
41define <4 x i32> @vmlalu16(<4 x i32>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
42 %tmp1 = load <4 x i32>* %A
43 %tmp2 = load <4 x i16>* %B
44 %tmp3 = load <4 x i16>* %C
45 %tmp4 = call <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3)
46 ret <4 x i32> %tmp4
47}
48
49define <2 x i64> @vmlalu32(<2 x i64>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
50 %tmp1 = load <2 x i64>* %A
51 %tmp2 = load <2 x i32>* %B
52 %tmp3 = load <2 x i32>* %C
53 %tmp4 = call <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3)
54 ret <2 x i64> %tmp4
55}
56
57declare <8 x i16> @llvm.arm.neon.vmlals.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
58declare <4 x i32> @llvm.arm.neon.vmlals.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
59declare <2 x i64> @llvm.arm.neon.vmlals.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
60
61declare <8 x i16> @llvm.arm.neon.vmlalu.v8i16(<8 x i16>, <8 x i8>, <8 x i8>) nounwind readnone
62declare <4 x i32> @llvm.arm.neon.vmlalu.v4i32(<4 x i32>, <4 x i16>, <4 x i16>) nounwind readnone
63declare <2 x i64> @llvm.arm.neon.vmlalu.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone