blob: 2cb3eba7423e4c6dde6686be28715e18d6e64314 [file] [log] [blame]
Bob Wilson5bafff32009-06-22 23:27:02 +00001; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t
2; RUN: grep {vceq\\.i8} %t | count 2
3; RUN: grep {vceq\\.i16} %t | count 2
4; RUN: grep {vceq\\.i32} %t | count 2
5; RUN: grep vmvn %t | count 6
6; RUN: grep {vcgt\\.s8} %t | count 1
7; RUN: grep {vcge\\.s16} %t | count 1
8; RUN: grep {vcgt\\.u16} %t | count 1
9; RUN: grep {vcge\\.u32} %t | count 1
10
Chris Lattnerb2773e12009-07-08 00:46:57 +000011; This tests icmp operations that do not map directly to NEON instructions.
Bob Wilson5bafff32009-06-22 23:27:02 +000012; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult)
13; and less-than-or-equal (le/ule) are implemented by swapping the arguments
14; to VCGT and VCGE. Test all the operand types for not-equal but only sample
15; the other operations.
16
17define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind {
18 %tmp1 = load <8 x i8>* %A
19 %tmp2 = load <8 x i8>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000020 %tmp3 = icmp ne <8 x i8> %tmp1, %tmp2
21 %tmp4 = sext <8 x i1> %tmp3 to <8 x i8>
22 ret <8 x i8> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000023}
24
25define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
26 %tmp1 = load <4 x i16>* %A
27 %tmp2 = load <4 x i16>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000028 %tmp3 = icmp ne <4 x i16> %tmp1, %tmp2
29 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
30 ret <4 x i16> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000031}
32
33define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind {
34 %tmp1 = load <2 x i32>* %A
35 %tmp2 = load <2 x i32>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000036 %tmp3 = icmp ne <2 x i32> %tmp1, %tmp2
37 %tmp4 = sext <2 x i1> %tmp3 to <2 x i32>
38 ret <2 x i32> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000039}
40
41define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
42 %tmp1 = load <16 x i8>* %A
43 %tmp2 = load <16 x i8>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000044 %tmp3 = icmp ne <16 x i8> %tmp1, %tmp2
45 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
46 ret <16 x i8> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000047}
48
49define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind {
50 %tmp1 = load <8 x i16>* %A
51 %tmp2 = load <8 x i16>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000052 %tmp3 = icmp ne <8 x i16> %tmp1, %tmp2
53 %tmp4 = sext <8 x i1> %tmp3 to <8 x i16>
54 ret <8 x i16> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000055}
56
57define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
58 %tmp1 = load <4 x i32>* %A
59 %tmp2 = load <4 x i32>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000060 %tmp3 = icmp ne <4 x i32> %tmp1, %tmp2
61 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
62 ret <4 x i32> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000063}
64
65define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind {
66 %tmp1 = load <16 x i8>* %A
67 %tmp2 = load <16 x i8>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000068 %tmp3 = icmp slt <16 x i8> %tmp1, %tmp2
69 %tmp4 = sext <16 x i1> %tmp3 to <16 x i8>
70 ret <16 x i8> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000071}
72
73define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
74 %tmp1 = load <4 x i16>* %A
75 %tmp2 = load <4 x i16>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000076 %tmp3 = icmp sle <4 x i16> %tmp1, %tmp2
77 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
78 ret <4 x i16> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000079}
80
81define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind {
82 %tmp1 = load <4 x i16>* %A
83 %tmp2 = load <4 x i16>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000084 %tmp3 = icmp ult <4 x i16> %tmp1, %tmp2
85 %tmp4 = sext <4 x i1> %tmp3 to <4 x i16>
86 ret <4 x i16> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000087}
88
89define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind {
90 %tmp1 = load <4 x i32>* %A
91 %tmp2 = load <4 x i32>* %B
Chris Lattnerb2773e12009-07-08 00:46:57 +000092 %tmp3 = icmp ule <4 x i32> %tmp1, %tmp2
93 %tmp4 = sext <4 x i1> %tmp3 to <4 x i32>
94 ret <4 x i32> %tmp4
Bob Wilson5bafff32009-06-22 23:27:02 +000095}