Bob Wilson | 5bafff3 | 2009-06-22 23:27:02 +0000 | [diff] [blame^] | 1 | ; RUN: llvm-as < %s | llc -march=arm -mattr=+neon > %t |
| 2 | ; RUN: grep {vshl\\.s8} %t | count 2 |
| 3 | ; RUN: grep {vshl\\.s16} %t | count 2 |
| 4 | ; RUN: grep {vshl\\.s32} %t | count 2 |
| 5 | ; RUN: grep {vshl\\.s64} %t | count 2 |
| 6 | ; RUN: grep {vshl\\.u8} %t | count 2 |
| 7 | ; RUN: grep {vshl\\.u16} %t | count 2 |
| 8 | ; RUN: grep {vshl\\.u32} %t | count 2 |
| 9 | ; RUN: grep {vshl\\.u64} %t | count 2 |
| 10 | ; RUN: grep {vshl\\.i8} %t | count 2 |
| 11 | ; RUN: grep {vshl\\.i16} %t | count 2 |
| 12 | ; RUN: grep {vshl\\.i32} %t | count 2 |
| 13 | ; RUN: grep {vshl\\.i64} %t | count 2 |
| 14 | ; RUN: grep {vshr\\.s8} %t | count 2 |
| 15 | ; RUN: grep {vshr\\.s16} %t | count 2 |
| 16 | ; RUN: grep {vshr\\.s32} %t | count 2 |
| 17 | ; RUN: grep {vshr\\.s64} %t | count 2 |
| 18 | ; RUN: grep {vshr\\.u8} %t | count 2 |
| 19 | ; RUN: grep {vshr\\.u16} %t | count 2 |
| 20 | ; RUN: grep {vshr\\.u32} %t | count 2 |
| 21 | ; RUN: grep {vshr\\.u64} %t | count 2 |
| 22 | |
| 23 | define <8 x i8> @vshls8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 24 | %tmp1 = load <8 x i8>* %A |
| 25 | %tmp2 = load <8 x i8>* %B |
| 26 | %tmp3 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) |
| 27 | ret <8 x i8> %tmp3 |
| 28 | } |
| 29 | |
| 30 | define <4 x i16> @vshls16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 31 | %tmp1 = load <4 x i16>* %A |
| 32 | %tmp2 = load <4 x i16>* %B |
| 33 | %tmp3 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) |
| 34 | ret <4 x i16> %tmp3 |
| 35 | } |
| 36 | |
| 37 | define <2 x i32> @vshls32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 38 | %tmp1 = load <2 x i32>* %A |
| 39 | %tmp2 = load <2 x i32>* %B |
| 40 | %tmp3 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) |
| 41 | ret <2 x i32> %tmp3 |
| 42 | } |
| 43 | |
| 44 | define <1 x i64> @vshls64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 45 | %tmp1 = load <1 x i64>* %A |
| 46 | %tmp2 = load <1 x i64>* %B |
| 47 | %tmp3 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) |
| 48 | ret <1 x i64> %tmp3 |
| 49 | } |
| 50 | |
| 51 | define <8 x i8> @vshlu8(<8 x i8>* %A, <8 x i8>* %B) nounwind { |
| 52 | %tmp1 = load <8 x i8>* %A |
| 53 | %tmp2 = load <8 x i8>* %B |
| 54 | %tmp3 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> %tmp2) |
| 55 | ret <8 x i8> %tmp3 |
| 56 | } |
| 57 | |
| 58 | define <4 x i16> @vshlu16(<4 x i16>* %A, <4 x i16>* %B) nounwind { |
| 59 | %tmp1 = load <4 x i16>* %A |
| 60 | %tmp2 = load <4 x i16>* %B |
| 61 | %tmp3 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> %tmp2) |
| 62 | ret <4 x i16> %tmp3 |
| 63 | } |
| 64 | |
| 65 | define <2 x i32> @vshlu32(<2 x i32>* %A, <2 x i32>* %B) nounwind { |
| 66 | %tmp1 = load <2 x i32>* %A |
| 67 | %tmp2 = load <2 x i32>* %B |
| 68 | %tmp3 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> %tmp2) |
| 69 | ret <2 x i32> %tmp3 |
| 70 | } |
| 71 | |
| 72 | define <1 x i64> @vshlu64(<1 x i64>* %A, <1 x i64>* %B) nounwind { |
| 73 | %tmp1 = load <1 x i64>* %A |
| 74 | %tmp2 = load <1 x i64>* %B |
| 75 | %tmp3 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> %tmp2) |
| 76 | ret <1 x i64> %tmp3 |
| 77 | } |
| 78 | |
| 79 | define <16 x i8> @vshlQs8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 80 | %tmp1 = load <16 x i8>* %A |
| 81 | %tmp2 = load <16 x i8>* %B |
| 82 | %tmp3 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) |
| 83 | ret <16 x i8> %tmp3 |
| 84 | } |
| 85 | |
| 86 | define <8 x i16> @vshlQs16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 87 | %tmp1 = load <8 x i16>* %A |
| 88 | %tmp2 = load <8 x i16>* %B |
| 89 | %tmp3 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) |
| 90 | ret <8 x i16> %tmp3 |
| 91 | } |
| 92 | |
| 93 | define <4 x i32> @vshlQs32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 94 | %tmp1 = load <4 x i32>* %A |
| 95 | %tmp2 = load <4 x i32>* %B |
| 96 | %tmp3 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) |
| 97 | ret <4 x i32> %tmp3 |
| 98 | } |
| 99 | |
| 100 | define <2 x i64> @vshlQs64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 101 | %tmp1 = load <2 x i64>* %A |
| 102 | %tmp2 = load <2 x i64>* %B |
| 103 | %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) |
| 104 | ret <2 x i64> %tmp3 |
| 105 | } |
| 106 | |
| 107 | define <16 x i8> @vshlQu8(<16 x i8>* %A, <16 x i8>* %B) nounwind { |
| 108 | %tmp1 = load <16 x i8>* %A |
| 109 | %tmp2 = load <16 x i8>* %B |
| 110 | %tmp3 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> %tmp2) |
| 111 | ret <16 x i8> %tmp3 |
| 112 | } |
| 113 | |
| 114 | define <8 x i16> @vshlQu16(<8 x i16>* %A, <8 x i16>* %B) nounwind { |
| 115 | %tmp1 = load <8 x i16>* %A |
| 116 | %tmp2 = load <8 x i16>* %B |
| 117 | %tmp3 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> %tmp2) |
| 118 | ret <8 x i16> %tmp3 |
| 119 | } |
| 120 | |
| 121 | define <4 x i32> @vshlQu32(<4 x i32>* %A, <4 x i32>* %B) nounwind { |
| 122 | %tmp1 = load <4 x i32>* %A |
| 123 | %tmp2 = load <4 x i32>* %B |
| 124 | %tmp3 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> %tmp2) |
| 125 | ret <4 x i32> %tmp3 |
| 126 | } |
| 127 | |
| 128 | define <2 x i64> @vshlQu64(<2 x i64>* %A, <2 x i64>* %B) nounwind { |
| 129 | %tmp1 = load <2 x i64>* %A |
| 130 | %tmp2 = load <2 x i64>* %B |
| 131 | %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) |
| 132 | ret <2 x i64> %tmp3 |
| 133 | } |
| 134 | |
| 135 | ; For left shifts by immediates, the signedness is irrelevant. |
| 136 | ; Test a mix of both signed and unsigned intrinsics. |
| 137 | |
| 138 | define <8 x i8> @vshli8(<8 x i8>* %A) nounwind { |
| 139 | %tmp1 = load <8 x i8>* %A |
| 140 | %tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >) |
| 141 | ret <8 x i8> %tmp2 |
| 142 | } |
| 143 | |
| 144 | define <4 x i16> @vshli16(<4 x i16>* %A) nounwind { |
| 145 | %tmp1 = load <4 x i16>* %A |
| 146 | %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 15, i16 15, i16 15, i16 15 >) |
| 147 | ret <4 x i16> %tmp2 |
| 148 | } |
| 149 | |
| 150 | define <2 x i32> @vshli32(<2 x i32>* %A) nounwind { |
| 151 | %tmp1 = load <2 x i32>* %A |
| 152 | %tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) |
| 153 | ret <2 x i32> %tmp2 |
| 154 | } |
| 155 | |
| 156 | define <1 x i64> @vshli64(<1 x i64>* %A) nounwind { |
| 157 | %tmp1 = load <1 x i64>* %A |
| 158 | %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 63 >) |
| 159 | ret <1 x i64> %tmp2 |
| 160 | } |
| 161 | |
| 162 | define <16 x i8> @vshlQi8(<16 x i8>* %A) nounwind { |
| 163 | %tmp1 = load <16 x i8>* %A |
| 164 | %tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7, i8 7 >) |
| 165 | ret <16 x i8> %tmp2 |
| 166 | } |
| 167 | |
| 168 | define <8 x i16> @vshlQi16(<8 x i16>* %A) nounwind { |
| 169 | %tmp1 = load <8 x i16>* %A |
| 170 | %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15, i16 15 >) |
| 171 | ret <8 x i16> %tmp2 |
| 172 | } |
| 173 | |
| 174 | define <4 x i32> @vshlQi32(<4 x i32>* %A) nounwind { |
| 175 | %tmp1 = load <4 x i32>* %A |
| 176 | %tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 31, i32 31, i32 31, i32 31 >) |
| 177 | ret <4 x i32> %tmp2 |
| 178 | } |
| 179 | |
| 180 | define <2 x i64> @vshlQi64(<2 x i64>* %A) nounwind { |
| 181 | %tmp1 = load <2 x i64>* %A |
| 182 | %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) |
| 183 | ret <2 x i64> %tmp2 |
| 184 | } |
| 185 | |
| 186 | ; Right shift by immediate: |
| 187 | |
| 188 | define <8 x i8> @vshrs8(<8 x i8>* %A) nounwind { |
| 189 | %tmp1 = load <8 x i8>* %A |
| 190 | %tmp2 = call <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 191 | ret <8 x i8> %tmp2 |
| 192 | } |
| 193 | |
| 194 | define <4 x i16> @vshrs16(<4 x i16>* %A) nounwind { |
| 195 | %tmp1 = load <4 x i16>* %A |
| 196 | %tmp2 = call <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 197 | ret <4 x i16> %tmp2 |
| 198 | } |
| 199 | |
| 200 | define <2 x i32> @vshrs32(<2 x i32>* %A) nounwind { |
| 201 | %tmp1 = load <2 x i32>* %A |
| 202 | %tmp2 = call <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >) |
| 203 | ret <2 x i32> %tmp2 |
| 204 | } |
| 205 | |
| 206 | define <1 x i64> @vshrs64(<1 x i64>* %A) nounwind { |
| 207 | %tmp1 = load <1 x i64>* %A |
| 208 | %tmp2 = call <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >) |
| 209 | ret <1 x i64> %tmp2 |
| 210 | } |
| 211 | |
| 212 | define <8 x i8> @vshru8(<8 x i8>* %A) nounwind { |
| 213 | %tmp1 = load <8 x i8>* %A |
| 214 | %tmp2 = call <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8> %tmp1, <8 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 215 | ret <8 x i8> %tmp2 |
| 216 | } |
| 217 | |
| 218 | define <4 x i16> @vshru16(<4 x i16>* %A) nounwind { |
| 219 | %tmp1 = load <4 x i16>* %A |
| 220 | %tmp2 = call <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16> %tmp1, <4 x i16> < i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 221 | ret <4 x i16> %tmp2 |
| 222 | } |
| 223 | |
| 224 | define <2 x i32> @vshru32(<2 x i32>* %A) nounwind { |
| 225 | %tmp1 = load <2 x i32>* %A |
| 226 | %tmp2 = call <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32> %tmp1, <2 x i32> < i32 -32, i32 -32 >) |
| 227 | ret <2 x i32> %tmp2 |
| 228 | } |
| 229 | |
| 230 | define <1 x i64> @vshru64(<1 x i64>* %A) nounwind { |
| 231 | %tmp1 = load <1 x i64>* %A |
| 232 | %tmp2 = call <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64> %tmp1, <1 x i64> < i64 -64 >) |
| 233 | ret <1 x i64> %tmp2 |
| 234 | } |
| 235 | |
| 236 | define <16 x i8> @vshrQs8(<16 x i8>* %A) nounwind { |
| 237 | %tmp1 = load <16 x i8>* %A |
| 238 | %tmp2 = call <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 239 | ret <16 x i8> %tmp2 |
| 240 | } |
| 241 | |
| 242 | define <8 x i16> @vshrQs16(<8 x i16>* %A) nounwind { |
| 243 | %tmp1 = load <8 x i16>* %A |
| 244 | %tmp2 = call <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 245 | ret <8 x i16> %tmp2 |
| 246 | } |
| 247 | |
| 248 | define <4 x i32> @vshrQs32(<4 x i32>* %A) nounwind { |
| 249 | %tmp1 = load <4 x i32>* %A |
| 250 | %tmp2 = call <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) |
| 251 | ret <4 x i32> %tmp2 |
| 252 | } |
| 253 | |
| 254 | define <2 x i64> @vshrQs64(<2 x i64>* %A) nounwind { |
| 255 | %tmp1 = load <2 x i64>* %A |
| 256 | %tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) |
| 257 | ret <2 x i64> %tmp2 |
| 258 | } |
| 259 | |
| 260 | define <16 x i8> @vshrQu8(<16 x i8>* %A) nounwind { |
| 261 | %tmp1 = load <16 x i8>* %A |
| 262 | %tmp2 = call <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8> %tmp1, <16 x i8> < i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8, i8 -8 >) |
| 263 | ret <16 x i8> %tmp2 |
| 264 | } |
| 265 | |
| 266 | define <8 x i16> @vshrQu16(<8 x i16>* %A) nounwind { |
| 267 | %tmp1 = load <8 x i16>* %A |
| 268 | %tmp2 = call <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16> %tmp1, <8 x i16> < i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16, i16 -16 >) |
| 269 | ret <8 x i16> %tmp2 |
| 270 | } |
| 271 | |
| 272 | define <4 x i32> @vshrQu32(<4 x i32>* %A) nounwind { |
| 273 | %tmp1 = load <4 x i32>* %A |
| 274 | %tmp2 = call <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32> %tmp1, <4 x i32> < i32 -32, i32 -32, i32 -32, i32 -32 >) |
| 275 | ret <4 x i32> %tmp2 |
| 276 | } |
| 277 | |
| 278 | define <2 x i64> @vshrQu64(<2 x i64>* %A) nounwind { |
| 279 | %tmp1 = load <2 x i64>* %A |
| 280 | %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 >) |
| 281 | ret <2 x i64> %tmp2 |
| 282 | } |
| 283 | |
| 284 | declare <8 x i8> @llvm.arm.neon.vshifts.v8i8(<8 x i8>, <8 x i8>) nounwind readnone |
| 285 | declare <4 x i16> @llvm.arm.neon.vshifts.v4i16(<4 x i16>, <4 x i16>) nounwind readnone |
| 286 | declare <2 x i32> @llvm.arm.neon.vshifts.v2i32(<2 x i32>, <2 x i32>) nounwind readnone |
| 287 | declare <1 x i64> @llvm.arm.neon.vshifts.v1i64(<1 x i64>, <1 x i64>) nounwind readnone |
| 288 | |
| 289 | declare <8 x i8> @llvm.arm.neon.vshiftu.v8i8(<8 x i8>, <8 x i8>) nounwind readnone |
| 290 | declare <4 x i16> @llvm.arm.neon.vshiftu.v4i16(<4 x i16>, <4 x i16>) nounwind readnone |
| 291 | declare <2 x i32> @llvm.arm.neon.vshiftu.v2i32(<2 x i32>, <2 x i32>) nounwind readnone |
| 292 | declare <1 x i64> @llvm.arm.neon.vshiftu.v1i64(<1 x i64>, <1 x i64>) nounwind readnone |
| 293 | |
| 294 | declare <16 x i8> @llvm.arm.neon.vshifts.v16i8(<16 x i8>, <16 x i8>) nounwind readnone |
| 295 | declare <8 x i16> @llvm.arm.neon.vshifts.v8i16(<8 x i16>, <8 x i16>) nounwind readnone |
| 296 | declare <4 x i32> @llvm.arm.neon.vshifts.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 297 | declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone |
| 298 | |
| 299 | declare <16 x i8> @llvm.arm.neon.vshiftu.v16i8(<16 x i8>, <16 x i8>) nounwind readnone |
| 300 | declare <8 x i16> @llvm.arm.neon.vshiftu.v8i16(<8 x i16>, <8 x i16>) nounwind readnone |
| 301 | declare <4 x i32> @llvm.arm.neon.vshiftu.v4i32(<4 x i32>, <4 x i32>) nounwind readnone |
| 302 | declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone |