Bob Wilson | 5bafff3 | 2009-06-22 23:27:02 +0000 | [diff] [blame] | 1 | ; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t |
| 2 | ; RUN: grep {vaba\\.s8} %t | count 2 |
| 3 | ; RUN: grep {vaba\\.s16} %t | count 2 |
| 4 | ; RUN: grep {vaba\\.s32} %t | count 2 |
| 5 | ; RUN: grep {vaba\\.u8} %t | count 2 |
| 6 | ; RUN: grep {vaba\\.u16} %t | count 2 |
| 7 | ; RUN: grep {vaba\\.u32} %t | count 2 |
| 8 | |
| 9 | define <8 x i8> @vabas8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { |
| 10 | %tmp1 = load <8 x i8>* %A |
| 11 | %tmp2 = load <8 x i8>* %B |
| 12 | %tmp3 = load <8 x i8>* %C |
| 13 | %tmp4 = call <8 x i8> @llvm.arm.neon.vabas.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3) |
| 14 | ret <8 x i8> %tmp4 |
| 15 | } |
| 16 | |
| 17 | define <4 x i16> @vabas16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { |
| 18 | %tmp1 = load <4 x i16>* %A |
| 19 | %tmp2 = load <4 x i16>* %B |
| 20 | %tmp3 = load <4 x i16>* %C |
| 21 | %tmp4 = call <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3) |
| 22 | ret <4 x i16> %tmp4 |
| 23 | } |
| 24 | |
| 25 | define <2 x i32> @vabas32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { |
| 26 | %tmp1 = load <2 x i32>* %A |
| 27 | %tmp2 = load <2 x i32>* %B |
| 28 | %tmp3 = load <2 x i32>* %C |
| 29 | %tmp4 = call <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) |
| 30 | ret <2 x i32> %tmp4 |
| 31 | } |
| 32 | |
| 33 | define <8 x i8> @vabau8(<8 x i8>* %A, <8 x i8>* %B, <8 x i8>* %C) nounwind { |
| 34 | %tmp1 = load <8 x i8>* %A |
| 35 | %tmp2 = load <8 x i8>* %B |
| 36 | %tmp3 = load <8 x i8>* %C |
| 37 | %tmp4 = call <8 x i8> @llvm.arm.neon.vabau.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i8> %tmp3) |
| 38 | ret <8 x i8> %tmp4 |
| 39 | } |
| 40 | |
| 41 | define <4 x i16> @vabau16(<4 x i16>* %A, <4 x i16>* %B, <4 x i16>* %C) nounwind { |
| 42 | %tmp1 = load <4 x i16>* %A |
| 43 | %tmp2 = load <4 x i16>* %B |
| 44 | %tmp3 = load <4 x i16>* %C |
| 45 | %tmp4 = call <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i16> %tmp3) |
| 46 | ret <4 x i16> %tmp4 |
| 47 | } |
| 48 | |
| 49 | define <2 x i32> @vabau32(<2 x i32>* %A, <2 x i32>* %B, <2 x i32>* %C) nounwind { |
| 50 | %tmp1 = load <2 x i32>* %A |
| 51 | %tmp2 = load <2 x i32>* %B |
| 52 | %tmp3 = load <2 x i32>* %C |
| 53 | %tmp4 = call <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> %tmp3) |
| 54 | ret <2 x i32> %tmp4 |
| 55 | } |
| 56 | |
| 57 | define <16 x i8> @vabaQs8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind { |
| 58 | %tmp1 = load <16 x i8>* %A |
| 59 | %tmp2 = load <16 x i8>* %B |
| 60 | %tmp3 = load <16 x i8>* %C |
| 61 | %tmp4 = call <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3) |
| 62 | ret <16 x i8> %tmp4 |
| 63 | } |
| 64 | |
| 65 | define <8 x i16> @vabaQs16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind { |
| 66 | %tmp1 = load <8 x i16>* %A |
| 67 | %tmp2 = load <8 x i16>* %B |
| 68 | %tmp3 = load <8 x i16>* %C |
| 69 | %tmp4 = call <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3) |
| 70 | ret <8 x i16> %tmp4 |
| 71 | } |
| 72 | |
| 73 | define <4 x i32> @vabaQs32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind { |
| 74 | %tmp1 = load <4 x i32>* %A |
| 75 | %tmp2 = load <4 x i32>* %B |
| 76 | %tmp3 = load <4 x i32>* %C |
| 77 | %tmp4 = call <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3) |
| 78 | ret <4 x i32> %tmp4 |
| 79 | } |
| 80 | |
| 81 | define <16 x i8> @vabaQu8(<16 x i8>* %A, <16 x i8>* %B, <16 x i8>* %C) nounwind { |
| 82 | %tmp1 = load <16 x i8>* %A |
| 83 | %tmp2 = load <16 x i8>* %B |
| 84 | %tmp3 = load <16 x i8>* %C |
| 85 | %tmp4 = call <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i8> %tmp3) |
| 86 | ret <16 x i8> %tmp4 |
| 87 | } |
| 88 | |
| 89 | define <8 x i16> @vabaQu16(<8 x i16>* %A, <8 x i16>* %B, <8 x i16>* %C) nounwind { |
| 90 | %tmp1 = load <8 x i16>* %A |
| 91 | %tmp2 = load <8 x i16>* %B |
| 92 | %tmp3 = load <8 x i16>* %C |
| 93 | %tmp4 = call <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i16> %tmp3) |
| 94 | ret <8 x i16> %tmp4 |
| 95 | } |
| 96 | |
| 97 | define <4 x i32> @vabaQu32(<4 x i32>* %A, <4 x i32>* %B, <4 x i32>* %C) nounwind { |
| 98 | %tmp1 = load <4 x i32>* %A |
| 99 | %tmp2 = load <4 x i32>* %B |
| 100 | %tmp3 = load <4 x i32>* %C |
| 101 | %tmp4 = call <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> %tmp3) |
| 102 | ret <4 x i32> %tmp4 |
| 103 | } |
| 104 | |
| 105 | declare <8 x i8> @llvm.arm.neon.vabas.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone |
| 106 | declare <4 x i16> @llvm.arm.neon.vabas.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone |
| 107 | declare <2 x i32> @llvm.arm.neon.vabas.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone |
| 108 | |
| 109 | declare <8 x i8> @llvm.arm.neon.vabau.v8i8(<8 x i8>, <8 x i8>, <8 x i8>) nounwind readnone |
| 110 | declare <4 x i16> @llvm.arm.neon.vabau.v4i16(<4 x i16>, <4 x i16>, <4 x i16>) nounwind readnone |
| 111 | declare <2 x i32> @llvm.arm.neon.vabau.v2i32(<2 x i32>, <2 x i32>, <2 x i32>) nounwind readnone |
| 112 | |
| 113 | declare <16 x i8> @llvm.arm.neon.vabas.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone |
| 114 | declare <8 x i16> @llvm.arm.neon.vabas.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone |
| 115 | declare <4 x i32> @llvm.arm.neon.vabas.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone |
| 116 | |
| 117 | declare <16 x i8> @llvm.arm.neon.vabau.v16i8(<16 x i8>, <16 x i8>, <16 x i8>) nounwind readnone |
| 118 | declare <8 x i16> @llvm.arm.neon.vabau.v8i16(<8 x i16>, <8 x i16>, <8 x i16>) nounwind readnone |
| 119 | declare <4 x i32> @llvm.arm.neon.vabau.v4i32(<4 x i32>, <4 x i32>, <4 x i32>) nounwind readnone |