blob: 0dca6e33755a2696b519e963d27dbe9c1c724355 [file] [log] [blame]
Bob Wilsonb2cae812009-10-07 22:30:19 +00001; RUN: llc < %s -march=arm -mattr=+neon | FileCheck %s
Bob Wilson5bafff32009-06-22 23:27:02 +00002
3define <8 x i8> @vmlsi8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8> * %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +00004;CHECK: vmlsi8:
5;CHECK: vmls.i8
Bob Wilson5bafff32009-06-22 23:27:02 +00006 %tmp1 = load <8 x i8>* %A
7 %tmp2 = load <8 x i8>* %B
8 %tmp3 = load <8 x i8>* %C
9 %tmp4 = mul <8 x i8> %tmp2, %tmp3
10 %tmp5 = sub <8 x i8> %tmp1, %tmp4
11 ret <8 x i8> %tmp5
12}
13
14define <4 x i16> @vmlsi16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000015;CHECK: vmlsi16:
16;CHECK: vmls.i16
Bob Wilson5bafff32009-06-22 23:27:02 +000017 %tmp1 = load <4 x i16>* %A
18 %tmp2 = load <4 x i16>* %B
19 %tmp3 = load <4 x i16>* %C
20 %tmp4 = mul <4 x i16> %tmp2, %tmp3
21 %tmp5 = sub <4 x i16> %tmp1, %tmp4
22 ret <4 x i16> %tmp5
23}
24
25define <2 x i32> @vmlsi32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000026;CHECK: vmlsi32:
27;CHECK: vmls.i32
Bob Wilson5bafff32009-06-22 23:27:02 +000028 %tmp1 = load <2 x i32>* %A
29 %tmp2 = load <2 x i32>* %B
30 %tmp3 = load <2 x i32>* %C
31 %tmp4 = mul <2 x i32> %tmp2, %tmp3
32 %tmp5 = sub <2 x i32> %tmp1, %tmp4
33 ret <2 x i32> %tmp5
34}
35
36define <2 x float> @vmlsf32(<2 x float>* %A, <2 x float>* %B, <2 x float>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000037;CHECK: vmlsf32:
38;CHECK: vmls.f32
Bob Wilson5bafff32009-06-22 23:27:02 +000039 %tmp1 = load <2 x float>* %A
40 %tmp2 = load <2 x float>* %B
41 %tmp3 = load <2 x float>* %C
42 %tmp4 = mul <2 x float> %tmp2, %tmp3
43 %tmp5 = sub <2 x float> %tmp1, %tmp4
44 ret <2 x float> %tmp5
45}
46
47define <16 x i8> @vmlsQi8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8> * %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000048;CHECK: vmlsQi8:
49;CHECK: vmls.i8
Bob Wilson5bafff32009-06-22 23:27:02 +000050 %tmp1 = load <16 x i8>* %A
51 %tmp2 = load <16 x i8>* %B
52 %tmp3 = load <16 x i8>* %C
53 %tmp4 = mul <16 x i8> %tmp2, %tmp3
54 %tmp5 = sub <16 x i8> %tmp1, %tmp4
55 ret <16 x i8> %tmp5
56}
57
58define <8 x i16> @vmlsQi16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000059;CHECK: vmlsQi16:
60;CHECK: vmls.i16
Bob Wilson5bafff32009-06-22 23:27:02 +000061 %tmp1 = load <8 x i16>* %A
62 %tmp2 = load <8 x i16>* %B
63 %tmp3 = load <8 x i16>* %C
64 %tmp4 = mul <8 x i16> %tmp2, %tmp3
65 %tmp5 = sub <8 x i16> %tmp1, %tmp4
66 ret <8 x i16> %tmp5
67}
68
69define <4 x i32> @vmlsQi32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000070;CHECK: vmlsQi32:
71;CHECK: vmls.i32
Bob Wilson5bafff32009-06-22 23:27:02 +000072 %tmp1 = load <4 x i32>* %A
73 %tmp2 = load <4 x i32>* %B
74 %tmp3 = load <4 x i32>* %C
75 %tmp4 = mul <4 x i32> %tmp2, %tmp3
76 %tmp5 = sub <4 x i32> %tmp1, %tmp4
77 ret <4 x i32> %tmp5
78}
79
80define <4 x float> @vmlsQf32(<4 x float>* %A, <4 x float>* %B, <4 x float>* %C) nounwind {
Bob Wilsonb2cae812009-10-07 22:30:19 +000081;CHECK: vmlsQf32:
82;CHECK: vmls.f32
Bob Wilson5bafff32009-06-22 23:27:02 +000083 %tmp1 = load <4 x float>* %A
84 %tmp2 = load <4 x float>* %B
85 %tmp3 = load <4 x float>* %C
86 %tmp4 = mul <4 x float> %tmp2, %tmp3
87 %tmp5 = sub <4 x float> %tmp1, %tmp4
88 ret <4 x float> %tmp5
89}