blob: 58ac6b057cf4371d4547f8d4e31b47e892ed1d99 [file] [log] [blame]
Bob Wilsonb2cae812009-10-07 22:30:19 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
Bob Wilson5bafff32009-06-22 23:27:02 +00002
3define <8 x i8> @vmlai8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +00004;CHECK: vmlai8:
5;CHECK: vmla.i8
Bob Wilson5bafff32009-06-22 23:27:02 +00006 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
8 %tmp3 = load <8 x i8>* %C
9 %tmp4 = mul <8 x i8> %tmp2, %tmp3
10 %tmp5 = add <8 x i8> %tmp1, %tmp4
11 ret <8 x i8> %tmp5
12}
13
14define <4 x i16> @vmlai16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000015;CHECK: vmlai16:
16;CHECK: vmla.i16
Bob Wilson5bafff32009-06-22 23:27:02 +000017 %tmp1 = load <4 x i16>* %A
18 %tmp2 = load <4 x i16>* %B
19 %tmp3 = load <4 x i16>* %C
20 %tmp4 = mul <4 x i16> %tmp2, %tmp3
21 %tmp5 = add <4 x i16> %tmp1, %tmp4
22 ret <4 x i16> %tmp5
23}
24
25define <2 x i32> @vmlai32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000026;CHECK: vmlai32:
27;CHECK: vmla.i32
Bob Wilson5bafff32009-06-22 23:27:02 +000028 %tmp1 = load <2 x i32>* %A
29 %tmp2 = load <2 x i32>* %B
30 %tmp3 = load <2 x i32>* %C
31 %tmp4 = mul <2 x i32> %tmp2, %tmp3
32 %tmp5 = add <2 x i32> %tmp1, %tmp4
33 ret <2 x i32> %tmp5
34}
35
36define <2 x float> @vmlaf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000037;CHECK: vmlaf32:
38;CHECK: vmla.f32
Bob Wilson5bafff32009-06-22 23:27:02 +000039 %tmp1 = load <2 x float>* %A
40 %tmp2 = load <2 x float>* %B
41 %tmp3 = load <2 x float>* %C
42 %tmp4 = mul <2 x float> %tmp2, %tmp3
43 %tmp5 = add <2 x float> %tmp1, %tmp4
44 ret <2 x float> %tmp5
45}
46
47define <16 x i8> @vmlaQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000048;CHECK: vmlaQi8:
49;CHECK: vmla.i8
Bob Wilson5bafff32009-06-22 23:27:02 +000050 %tmp1 = load <16 x i8>* %A
51 %tmp2 = load <16 x i8>* %B
52 %tmp3 = load <16 x i8>* %C
53 %tmp4 = mul <16 x i8> %tmp2, %tmp3
54 %tmp5 = add <16 x i8> %tmp1, %tmp4
55 ret <16 x i8> %tmp5
56}
57
58define <8 x i16> @vmlaQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000059;CHECK: vmlaQi16:
60;CHECK: vmla.i16
Bob Wilson5bafff32009-06-22 23:27:02 +000061 %tmp1 = load <8 x i16>* %A
62 %tmp2 = load <8 x i16>* %B
63 %tmp3 = load <8 x i16>* %C
64 %tmp4 = mul <8 x i16> %tmp2, %tmp3
65 %tmp5 = add <8 x i16> %tmp1, %tmp4
66 ret <8 x i16> %tmp5
67}
68
69define <4 x i32> @vmlaQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000070;CHECK: vmlaQi32:
71;CHECK: vmla.i32
Bob Wilson5bafff32009-06-22 23:27:02 +000072 %tmp1 = load <4 x i32>* %A
73 %tmp2 = load <4 x i32>* %B
74 %tmp3 = load <4 x i32>* %C
75 %tmp4 = mul <4 x i32> %tmp2, %tmp3
76 %tmp5 = add <4 x i32> %tmp1, %tmp4
77 ret <4 x i32> %tmp5
78}
79
80define <4 x float> @vmlaQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000081;CHECK: vmlaQf32:
82;CHECK: vmla.f32
Bob Wilson5bafff32009-06-22 23:27:02 +000083 %tmp1 = load <4 x float>* %A
84 %tmp2 = load <4 x float>* %B
85 %tmp3 = load <4 x float>* %C
86 %tmp4 = mul <4 x float> %tmp2, %tmp3
87 %tmp5 = add <4 x float> %tmp1, %tmp4
88 ret <4 x float> %tmp5
89}