Bob Wilson | 5bafff3 | 2009-06-22 23:27:02 +0000 | [diff] [blame] | 1 | ; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t |
| 2 | ; RUN: grep {vceq\\.i8} %t | count 2 |
| 3 | ; RUN: grep {vceq\\.i16} %t | count 2 |
| 4 | ; RUN: grep {vceq\\.i32} %t | count 2 |
| 5 | ; RUN: grep vmvn %t | count 6 |
| 6 | ; RUN: grep {vcgt\\.s8} %t | count 1 |
| 7 | ; RUN: grep {vcge\\.s16} %t | count 1 |
| 8 | ; RUN: grep {vcgt\\.u16} %t | count 1 |
| 9 | ; RUN: grep {vcge\\.u32} %t | count 1 |
| 10 | |
| 11 | ; This tests vicmp operations that do not map directly to NEON instructions. |
| 12 | ; Not-equal (ne) operations are implemented by VCEQ/VMVN. Less-than (lt/ult) |
| 13 | ; and less-than-or-equal (le/ule) are implemented by swapping the arguments |
| 14 | ; to VCGT and VCGE. Test all the operand types for not-equal but only sample |
| 15 | ; the other operations. |
| 16 | |
| 17 | define <8 x i8> @vcnei8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 18 | %tmp1 = load <8 x i8>* %A |
| 19 | %tmp2 = load <8 x i8>* %B |
| 20 | %tmp3 = vicmp ne <8 x i8> %tmp1, %tmp2 |
| 21 | ret <8 x i8> %tmp3 |
| 22 | } |
| 23 | |
| 24 | define <4 x i16> @vcnei16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 25 | %tmp1 = load <4 x i16>* %A |
| 26 | %tmp2 = load <4 x i16>* %B |
| 27 | %tmp3 = vicmp ne <4 x i16> %tmp1, %tmp2 |
| 28 | ret <4 x i16> %tmp3 |
| 29 | } |
| 30 | |
| 31 | define <2 x i32> @vcnei32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 32 | %tmp1 = load <2 x i32>* %A |
| 33 | %tmp2 = load <2 x i32>* %B |
| 34 | %tmp3 = vicmp ne <2 x i32> %tmp1, %tmp2 |
| 35 | ret <2 x i32> %tmp3 |
| 36 | } |
| 37 | |
| 38 | define <16 x i8> @vcneQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 39 | %tmp1 = load <16 x i8>* %A |
| 40 | %tmp2 = load <16 x i8>* %B |
| 41 | %tmp3 = vicmp ne <16 x i8> %tmp1, %tmp2 |
| 42 | ret <16 x i8> %tmp3 |
| 43 | } |
| 44 | |
| 45 | define <8 x i16> @vcneQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 46 | %tmp1 = load <8 x i16>* %A |
| 47 | %tmp2 = load <8 x i16>* %B |
| 48 | %tmp3 = vicmp ne <8 x i16> %tmp1, %tmp2 |
| 49 | ret <8 x i16> %tmp3 |
| 50 | } |
| 51 | |
| 52 | define <4 x i32> @vcneQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 53 | %tmp1 = load <4 x i32>* %A |
| 54 | %tmp2 = load <4 x i32>* %B |
| 55 | %tmp3 = vicmp ne <4 x i32> %tmp1, %tmp2 |
| 56 | ret <4 x i32> %tmp3 |
| 57 | } |
| 58 | |
| 59 | define <16 x i8> @vcltQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 60 | %tmp1 = load <16 x i8>* %A |
| 61 | %tmp2 = load <16 x i8>* %B |
| 62 | %tmp3 = vicmp slt <16 x i8> %tmp1, %tmp2 |
| 63 | ret <16 x i8> %tmp3 |
| 64 | } |
| 65 | |
| 66 | define <4 x i16> @vcles16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 67 | %tmp1 = load <4 x i16>* %A |
| 68 | %tmp2 = load <4 x i16>* %B |
| 69 | %tmp3 = vicmp sle <4 x i16> %tmp1, %tmp2 |
| 70 | ret <4 x i16> %tmp3 |
| 71 | } |
| 72 | |
| 73 | define <4 x i16> @vcltu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 74 | %tmp1 = load <4 x i16>* %A |
| 75 | %tmp2 = load <4 x i16>* %B |
| 76 | %tmp3 = vicmp ult <4 x i16> %tmp1, %tmp2 |
| 77 | ret <4 x i16> %tmp3 |
| 78 | } |
| 79 | |
| 80 | define <4 x i32> @vcleQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 81 | %tmp1 = load <4 x i32>* %A |
| 82 | %tmp2 = load <4 x i32>* %B |
| 83 | %tmp3 = vicmp ule <4 x i32> %tmp1, %tmp2 |
| 84 | ret <4 x i32> %tmp3 |
| 85 | } |