Dan Gohman | fce288f | 2009-09-09 00:09:15 +0000 | [diff] [blame^] | 1 | ; RUN: llc < %s -march=arm -mattr=+neon > %t |
Bob Wilson | 5bafff3 | 2009-06-22 23:27:02 +0000 | [diff] [blame] | 2 | ; RUN: grep {vsra\\.s8} %t | count 2 |
| 3 | ; RUN: grep {vsra\\.s16} %t | count 2 |
| 4 | ; RUN: grep {vsra\\.s32} %t | count 2 |
| 5 | ; RUN: grep {vsra\\.s64} %t | count 2 |
| 6 | ; RUN: grep {vsra\\.u8} %t | count 2 |
| 7 | ; RUN: grep {vsra\\.u16} %t | count 2 |
| 8 | ; RUN: grep {vsra\\.u32} %t | count 2 |
| 9 | ; RUN: grep {vsra\\.u64} %t | count 2 |
| 10 | ; RUN: grep {vrsra\\.s8} %t | count 2 |
| 11 | ; RUN: grep {vrsra\\.s16} %t | count 2 |
| 12 | ; RUN: grep {vrsra\\.s32} %t | count 2 |
| 13 | ; RUN: grep {vrsra\\.s64} %t | count 2 |
| 14 | ; RUN: grep {vrsra\\.u8} %t | count 2 |
| 15 | ; RUN: grep {vrsra\\.u16} %t | count 2 |
| 16 | ; RUN: grep {vrsra\\.u32} %t | count 2 |
| 17 | ; RUN: grep {vrsra\\.u64} %t | count 2 |
| 18 | |
| 19 | define <8 x i8> @vsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 20 | %tmp1 = load <8 x i8>* %A |
| 21 | %tmp2 = load <8 x i8>* %B |
| 22 | %tmp3 = ashr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > |
| 23 | %tmp4 = add <8 x i8> %tmp1, %tmp3 |
| 24 | ret <8 x i8> %tmp4 |
| 25 | } |
| 26 | |
| 27 | define <4 x i16> @vsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 28 | %tmp1 = load <4 x i16>* %A |
| 29 | %tmp2 = load <4 x i16>* %B |
| 30 | %tmp3 = ashr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 > |
| 31 | %tmp4 = add <4 x i16> %tmp1, %tmp3 |
| 32 | ret <4 x i16> %tmp4 |
| 33 | } |
| 34 | |
| 35 | define <2 x i32> @vsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 36 | %tmp1 = load <2 x i32>* %A |
| 37 | %tmp2 = load <2 x i32>* %B |
| 38 | %tmp3 = ashr <2 x i32> %tmp2, < i32 32, i32 32 > |
| 39 | %tmp4 = add <2 x i32> %tmp1, %tmp3 |
| 40 | ret <2 x i32> %tmp4 |
| 41 | } |
| 42 | |
| 43 | define <1 x i64> @vsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 44 | %tmp1 = load <1 x i64>* %A |
| 45 | %tmp2 = load <1 x i64>* %B |
| 46 | %tmp3 = ashr <1 x i64> %tmp2, < i64 64 > |
| 47 | %tmp4 = add <1 x i64> %tmp1, %tmp3 |
| 48 | ret <1 x i64> %tmp4 |
| 49 | } |
| 50 | |
| 51 | define <16 x i8> @vsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 52 | %tmp1 = load <16 x i8>* %A |
| 53 | %tmp2 = load <16 x i8>* %B |
| 54 | %tmp3 = ashr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > |
| 55 | %tmp4 = add <16 x i8> %tmp1, %tmp3 |
| 56 | ret <16 x i8> %tmp4 |
| 57 | } |
| 58 | |
| 59 | define <8 x i16> @vsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 60 | %tmp1 = load <8 x i16>* %A |
| 61 | %tmp2 = load <8 x i16>* %B |
| 62 | %tmp3 = ashr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 > |
| 63 | %tmp4 = add <8 x i16> %tmp1, %tmp3 |
| 64 | ret <8 x i16> %tmp4 |
| 65 | } |
| 66 | |
| 67 | define <4 x i32> @vsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 68 | %tmp1 = load <4 x i32>* %A |
| 69 | %tmp2 = load <4 x i32>* %B |
| 70 | %tmp3 = ashr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 > |
| 71 | %tmp4 = add <4 x i32> %tmp1, %tmp3 |
| 72 | ret <4 x i32> %tmp4 |
| 73 | } |
| 74 | |
| 75 | define <2 x i64> @vsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 76 | %tmp1 = load <2 x i64>* %A |
| 77 | %tmp2 = load <2 x i64>* %B |
| 78 | %tmp3 = ashr <2 x i64> %tmp2, < i64 64, i64 64 > |
| 79 | %tmp4 = add <2 x i64> %tmp1, %tmp3 |
| 80 | ret <2 x i64> %tmp4 |
| 81 | } |
| 82 | |
| 83 | define <8 x i8> @vsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 84 | %tmp1 = load <8 x i8>* %A |
| 85 | %tmp2 = load <8 x i8>* %B |
| 86 | %tmp3 = lshr <8 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > |
| 87 | %tmp4 = add <8 x i8> %tmp1, %tmp3 |
| 88 | ret <8 x i8> %tmp4 |
| 89 | } |
| 90 | |
| 91 | define <4 x i16> @vsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 92 | %tmp1 = load <4 x i16>* %A |
| 93 | %tmp2 = load <4 x i16>* %B |
| 94 | %tmp3 = lshr <4 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16 > |
| 95 | %tmp4 = add <4 x i16> %tmp1, %tmp3 |
| 96 | ret <4 x i16> %tmp4 |
| 97 | } |
| 98 | |
| 99 | define <2 x i32> @vsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 100 | %tmp1 = load <2 x i32>* %A |
| 101 | %tmp2 = load <2 x i32>* %B |
| 102 | %tmp3 = lshr <2 x i32> %tmp2, < i32 32, i32 32 > |
| 103 | %tmp4 = add <2 x i32> %tmp1, %tmp3 |
| 104 | ret <2 x i32> %tmp4 |
| 105 | } |
| 106 | |
| 107 | define <1 x i64> @vsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 108 | %tmp1 = load <1 x i64>* %A |
| 109 | %tmp2 = load <1 x i64>* %B |
| 110 | %tmp3 = lshr <1 x i64> %tmp2, < i64 64 > |
| 111 | %tmp4 = add <1 x i64> %tmp1, %tmp3 |
| 112 | ret <1 x i64> %tmp4 |
| 113 | } |
| 114 | |
| 115 | define <16 x i8> @vsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 116 | %tmp1 = load <16 x i8>* %A |
| 117 | %tmp2 = load <16 x i8>* %B |
| 118 | %tmp3 = lshr <16 x i8> %tmp2, < i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8, i8 8 > |
| 119 | %tmp4 = add <16 x i8> %tmp1, %tmp3 |
| 120 | ret <16 x i8> %tmp4 |
| 121 | } |
| 122 | |
| 123 | define <8 x i16> @vsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 124 | %tmp1 = load <8 x i16>* %A |
| 125 | %tmp2 = load <8 x i16>* %B |
| 126 | %tmp3 = lshr <8 x i16> %tmp2, < i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16, i16 16 > |
| 127 | %tmp4 = add <8 x i16> %tmp1, %tmp3 |
| 128 | ret <8 x i16> %tmp4 |
| 129 | } |
| 130 | |
| 131 | define <4 x i32> @vsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 132 | %tmp1 = load <4 x i32>* %A |
| 133 | %tmp2 = load <4 x i32>* %B |
| 134 | %tmp3 = lshr <4 x i32> %tmp2, < i32 32, i32 32, i32 32, i32 32 > |
| 135 | %tmp4 = add <4 x i32> %tmp1, %tmp3 |
| 136 | ret <4 x i32> %tmp4 |
| 137 | } |
| 138 | |
| 139 | define <2 x i64> @vsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 140 | %tmp1 = load <2 x i64>* %A |
| 141 | %tmp2 = load <2 x i64>* %B |
| 142 | %tmp3 = lshr <2 x i64> %tmp2, < i64 64, i64 64 > |
| 143 | %tmp4 = add <2 x i64> %tmp1, %tmp3 |
| 144 | ret <2 x i64> %tmp4 |
| 145 | } |
| 146 | |
| 147 | define <8 x i8> @vrsras8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 148 | %tmp1 = load <8 x i8>* %A |
| 149 | %tmp2 = load <8 x i8>* %B |
| 150 | %tmp3 = call <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 151 | %tmp4 = add <8 x i8> %tmp1, %tmp3 |
| 152 | ret <8 x i8> %tmp4 |
| 153 | } |
| 154 | |
| 155 | define <4 x i16> @vrsras16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 156 | %tmp1 = load <4 x i16>* %A |
| 157 | %tmp2 = load <4 x i16>* %B |
| 158 | %tmp3 = call <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 159 | %tmp4 = add <4 x i16> %tmp1, %tmp3 |
| 160 | ret <4 x i16> %tmp4 |
| 161 | } |
| 162 | |
| 163 | define <2 x i32> @vrsras32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 164 | %tmp1 = load <2 x i32>* %A |
| 165 | %tmp2 = load <2 x i32>* %B |
| 166 | %tmp3 = call <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >) |
| 167 | %tmp4 = add <2 x i32> %tmp1, %tmp3 |
| 168 | ret <2 x i32> %tmp4 |
| 169 | } |
| 170 | |
| 171 | define <1 x i64> @vrsras64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 172 | %tmp1 = load <1 x i64>* %A |
| 173 | %tmp2 = load <1 x i64>* %B |
| 174 | %tmp3 = call <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >) |
| 175 | %tmp4 = add <1 x i64> %tmp1, %tmp3 |
| 176 | ret <1 x i64> %tmp4 |
| 177 | } |
| 178 | |
| 179 | define <8 x i8> @vrsrau8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 180 | %tmp1 = load <8 x i8>* %A |
| 181 | %tmp2 = load <8 x i8>* %B |
| 182 | %tmp3 = call <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8> %tmp2, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 183 | %tmp4 = add <8 x i8> %tmp1, %tmp3 |
| 184 | ret <8 x i8> %tmp4 |
| 185 | } |
| 186 | |
| 187 | define <4 x i16> @vrsrau16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 188 | %tmp1 = load <4 x i16>* %A |
| 189 | %tmp2 = load <4 x i16>* %B |
| 190 | %tmp3 = call <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16> %tmp2, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 191 | %tmp4 = add <4 x i16> %tmp1, %tmp3 |
| 192 | ret <4 x i16> %tmp4 |
| 193 | } |
| 194 | |
| 195 | define <2 x i32> @vrsrau32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 196 | %tmp1 = load <2 x i32>* %A |
| 197 | %tmp2 = load <2 x i32>* %B |
| 198 | %tmp3 = call <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32> %tmp2, <2 x i32> < i32 -32, i32 -32 >) |
| 199 | %tmp4 = add <2 x i32> %tmp1, %tmp3 |
| 200 | ret <2 x i32> %tmp4 |
| 201 | } |
| 202 | |
| 203 | define <1 x i64> @vrsrau64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 204 | %tmp1 = load <1 x i64>* %A |
| 205 | %tmp2 = load <1 x i64>* %B |
| 206 | %tmp3 = call <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64> %tmp2, <1 x i64> < i64 -64 >) |
| 207 | %tmp4 = add <1 x i64> %tmp1, %tmp3 |
| 208 | ret <1 x i64> %tmp4 |
| 209 | } |
| 210 | |
| 211 | define <16 x i8> @vrsraQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 212 | %tmp1 = load <16 x i8>* %A |
| 213 | %tmp2 = load <16 x i8>* %B |
| 214 | %tmp3 = call <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 215 | %tmp4 = add <16 x i8> %tmp1, %tmp3 |
| 216 | ret <16 x i8> %tmp4 |
| 217 | } |
| 218 | |
| 219 | define <8 x i16> @vrsraQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 220 | %tmp1 = load <8 x i16>* %A |
| 221 | %tmp2 = load <8 x i16>* %B |
| 222 | %tmp3 = call <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 223 | %tmp4 = add <8 x i16> %tmp1, %tmp3 |
| 224 | ret <8 x i16> %tmp4 |
| 225 | } |
| 226 | |
| 227 | define <4 x i32> @vrsraQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 228 | %tmp1 = load <4 x i32>* %A |
| 229 | %tmp2 = load <4 x i32>* %B |
| 230 | %tmp3 = call <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) |
| 231 | %tmp4 = add <4 x i32> %tmp1, %tmp3 |
| 232 | ret <4 x i32> %tmp4 |
| 233 | } |
| 234 | |
| 235 | define <2 x i64> @vrsraQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 236 | %tmp1 = load <2 x i64>* %A |
| 237 | %tmp2 = load <2 x i64>* %B |
| 238 | %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) |
| 239 | %tmp4 = add <2 x i64> %tmp1, %tmp3 |
| 240 | ret <2 x i64> %tmp4 |
| 241 | } |
| 242 | |
| 243 | define <16 x i8> @vrsraQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 244 | %tmp1 = load <16 x i8>* %A |
| 245 | %tmp2 = load <16 x i8>* %B |
| 246 | %tmp3 = call <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8> %tmp2, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 247 | %tmp4 = add <16 x i8> %tmp1, %tmp3 |
| 248 | ret <16 x i8> %tmp4 |
| 249 | } |
| 250 | |
| 251 | define <8 x i16> @vrsraQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 252 | %tmp1 = load <8 x i16>* %A |
| 253 | %tmp2 = load <8 x i16>* %B |
| 254 | %tmp3 = call <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16> %tmp2, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 255 | %tmp4 = add <8 x i16> %tmp1, %tmp3 |
| 256 | ret <8 x i16> %tmp4 |
| 257 | } |
| 258 | |
| 259 | define <4 x i32> @vrsraQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 260 | %tmp1 = load <4 x i32>* %A |
| 261 | %tmp2 = load <4 x i32>* %B |
| 262 | %tmp3 = call <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32> %tmp2, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) |
| 263 | %tmp4 = add <4 x i32> %tmp1, %tmp3 |
| 264 | ret <4 x i32> %tmp4 |
| 265 | } |
| 266 | |
| 267 | define <2 x i64> @vrsraQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 268 | %tmp1 = load <2 x i64>* %A |
| 269 | %tmp2 = load <2 x i64>* %B |
| 270 | %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp2, <2 x i64> < i64 -64, i64 -64 >) |
| 271 | %tmp4 = add <2 x i64> %tmp1, %tmp3 |
| 272 | ret <2 x i64> %tmp4 |
| 273 | } |
| 274 | |
| 275 | declare <8 x i8> @llvm.arm.neon.vrshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone |
| 276 | declare <4 x i16> @llvm.arm.neon.vrshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone |
| 277 | declare <2 x i32> @llvm.arm.neon.vrshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone |
| 278 | declare <1 x i64> @llvm.arm.neon.vrshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone |
| 279 | |
| 280 | declare <8 x i8> @llvm.arm.neon.vrshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone |
| 281 | declare <4 x i16> @llvm.arm.neon.vrshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone |
| 282 | declare <2 x i32> @llvm.arm.neon.vrshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone |
| 283 | declare <1 x i64> @llvm.arm.neon.vrshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone |
| 284 | |
| 285 | declare <16 x i8> @llvm.arm.neon.vrshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone |
| 286 | declare <8 x i16> @llvm.arm.neon.vrshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone |
| 287 | declare <4 x i32> @llvm.arm.neon.vrshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 288 | declare <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone |
| 289 | |
| 290 | declare <16 x i8> @llvm.arm.neon.vrshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone |
| 291 | declare <8 x i16> @llvm.arm.neon.vrshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone |
| 292 | declare <4 x i32> @llvm.arm.neon.vrshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 293 | declare <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone |