Nadav Rotem | 8c20ec5 | 2011-02-24 21:01:34 +0000 | [diff] [blame] | 1 | ; RUN: llc < %s -march=x86-64 |
| 2 | ; PR 9267 |
| 3 | |
| 4 | define<4 x i16> @func_16_32() { |
| 5 | %F = load <4 x i32>* undef |
| 6 | %G = trunc <4 x i32> %F to <4 x i16> |
| 7 | %H = load <4 x i32>* undef |
| 8 | %Y = trunc <4 x i32> %H to <4 x i16> |
| 9 | %T = add <4 x i16> %Y, %G |
| 10 | store <4 x i16>%T , <4 x i16>* undef |
| 11 | ret <4 x i16> %T |
| 12 | } |
| 13 | |
| 14 | define<4 x i16> @func_16_64() { |
| 15 | %F = load <4 x i64>* undef |
| 16 | %G = trunc <4 x i64> %F to <4 x i16> |
| 17 | %H = load <4 x i64>* undef |
| 18 | %Y = trunc <4 x i64> %H to <4 x i16> |
| 19 | %T = xor <4 x i16> %Y, %G |
| 20 | store <4 x i16>%T , <4 x i16>* undef |
| 21 | ret <4 x i16> %T |
| 22 | } |
| 23 | |
| 24 | define<4 x i32> @func_32_64() { |
| 25 | %F = load <4 x i64>* undef |
| 26 | %G = trunc <4 x i64> %F to <4 x i32> |
| 27 | %H = load <4 x i64>* undef |
| 28 | %Y = trunc <4 x i64> %H to <4 x i32> |
| 29 | %T = or <4 x i32> %Y, %G |
| 30 | ret <4 x i32> %T |
| 31 | } |
| 32 | |
| 33 | define<4 x i8> @func_8_16() { |
| 34 | %F = load <4 x i16>* undef |
| 35 | %G = trunc <4 x i16> %F to <4 x i8> |
| 36 | %H = load <4 x i16>* undef |
| 37 | %Y = trunc <4 x i16> %H to <4 x i8> |
| 38 | %T = add <4 x i8> %Y, %G |
| 39 | ret <4 x i8> %T |
| 40 | } |
| 41 | |
| 42 | define<4 x i8> @func_8_32() { |
| 43 | %F = load <4 x i32>* undef |
| 44 | %G = trunc <4 x i32> %F to <4 x i8> |
| 45 | %H = load <4 x i32>* undef |
| 46 | %Y = trunc <4 x i32> %H to <4 x i8> |
| 47 | %T = sub <4 x i8> %Y, %G |
| 48 | ret <4 x i8> %T |
| 49 | } |
| 50 | |
| 51 | define<4 x i8> @func_8_64() { |
| 52 | %F = load <4 x i64>* undef |
| 53 | %G = trunc <4 x i64> %F to <4 x i8> |
| 54 | %H = load <4 x i64>* undef |
| 55 | %Y = trunc <4 x i64> %H to <4 x i8> |
| 56 | %T = add <4 x i8> %Y, %G |
| 57 | ret <4 x i8> %T |
| 58 | } |
| 59 | |
| 60 | define<4 x i16> @const_16_32() { |
| 61 | %G = trunc <4 x i32> <i32 0, i32 3, i32 8, i32 7> to <4 x i16> |
| 62 | ret <4 x i16> %G |
| 63 | } |
| 64 | |
| 65 | define<4 x i16> @const_16_64() { |
| 66 | %G = trunc <4 x i64> <i64 0, i64 3, i64 8, i64 7> to <4 x i16> |
| 67 | ret <4 x i16> %G |
| 68 | } |
| 69 | |
| 70 | define void @bugOnTruncBitwidthReduce() nounwind { |
| 71 | meh: |
| 72 | %0 = xor <4 x i64> zeroinitializer, zeroinitializer |
| 73 | %1 = trunc <4 x i64> %0 to <4 x i32> |
| 74 | %2 = lshr <4 x i32> %1, <i32 18, i32 18, i32 18, i32 18> |
| 75 | %3 = xor <4 x i32> %2, %1 |
| 76 | ret void |
| 77 | } |