blob: c3bd141d60c52011f7f4a2acb32619cc4951864b [file] [log] [blame]
Dan Gohmanda594cf2009-09-09 00:09:15 +00001; RUN: llc < %s -march=arm -mattr=+neon > %t
Bob Wilsone60fee02009-06-22 23:27:02 +00002; RUN: grep {vmull\\.s8} %t | count 1
3; RUN: grep {vmull\\.s16} %t | count 1
4; RUN: grep {vmull\\.s32} %t | count 1
5; RUN: grep {vmull\\.u8} %t | count 1
6; RUN: grep {vmull\\.u16} %t | count 1
7; RUN: grep {vmull\\.u32} %t | count 1
8; RUN: grep {vmull\\.p8} %t | count 1
9
10define <8 x i16> @vmulls8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
11 %tmp1 = load <8 x i8>* %A
12 %tmp2 = load <8 x i8>* %B
13 %tmp3 = call <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
14 ret <8 x i16> %tmp3
15}
16
17define <4 x i32> @vmulls16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
18 %tmp1 = load <4 x i16>* %A
19 %tmp2 = load <4 x i16>* %B
20 %tmp3 = call <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
21 ret <4 x i32> %tmp3
22}
23
24define <2 x i64> @vmulls32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
25 %tmp1 = load <2 x i32>* %A
26 %tmp2 = load <2 x i32>* %B
27 %tmp3 = call <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
28 ret <2 x i64> %tmp3
29}
30
31define <8 x i16> @vmullu8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
32 %tmp1 = load <8 x i8>* %A
33 %tmp2 = load <8 x i8>* %B
34 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
35 ret <8 x i16> %tmp3
36}
37
38define <4 x i32> @vmullu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
39 %tmp1 = load <4 x i16>* %A
40 %tmp2 = load <4 x i16>* %B
41 %tmp3 = call <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16> %tmp1, <4 x i16> %tmp2)
42 ret <4 x i32> %tmp3
43}
44
45define <2 x i64> @vmullu32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
46 %tmp1 = load <2 x i32>* %A
47 %tmp2 = load <2 x i32>* %B
48 %tmp3 = call <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2)
49 ret <2 x i64> %tmp3
50}
51
52define <8 x i16> @vmullp8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
53 %tmp1 = load <8 x i8>* %A
54 %tmp2 = load <8 x i8>* %B
55 %tmp3 = call <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8> %tmp1, <8 x i8> %tmp2)
56 ret <8 x i16> %tmp3
57}
58
59declare <8 x i16> @llvm.arm.neon.vmulls.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
60declare <4 x i32> @llvm.arm.neon.vmulls.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
61declare <2 x i64> @llvm.arm.neon.vmulls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
62
63declare <8 x i16> @llvm.arm.neon.vmullu.v8i16(<8 x i8>, <8 x i8>) nounwind readnone
64declare <4 x i32> @llvm.arm.neon.vmullu.v4i32(<4 x i16>, <4 x i16>) nounwind readnone
65declare <2 x i64> @llvm.arm.neon.vmullu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
66
67declare <8 x i16> @llvm.arm.neon.vmullp.v8i16(<8 x i8>, <8 x i8>) nounwind readnone