Reid Spencer | eb1d74e | 2007-04-16 17:36:08 +0000 | [diff] [blame] | 1 | ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 | not grep mullw |
Reid Spencer | 69ccadd | 2006-12-02 04:23:10 +0000 | [diff] [blame] | 2 | ; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 | grep vmsumuhm |
Chris Lattner | 843ecd6 | 2006-04-18 03:22:16 +0000 | [diff] [blame] | 3 | |
Chris Lattner | a637e58 | 2006-04-18 03:54:50 +0000 | [diff] [blame] | 4 | <4 x int> %test_v4i32(<4 x int>* %X, <4 x int>* %Y) { |
Chris Lattner | 843ecd6 | 2006-04-18 03:22:16 +0000 | [diff] [blame] | 5 | %tmp = load <4 x int>* %X |
| 6 | %tmp2 = load <4 x int>* %Y |
| 7 | %tmp3 = mul <4 x int> %tmp, %tmp2 |
| 8 | ret <4 x int> %tmp3 |
| 9 | } |
| 10 | |
Chris Lattner | a637e58 | 2006-04-18 03:54:50 +0000 | [diff] [blame] | 11 | <8 x short> %test_v8i16(<8 x short>* %X, <8 x short>* %Y) { |
| 12 | %tmp = load <8 x short>* %X |
| 13 | %tmp2 = load <8 x short>* %Y |
| 14 | %tmp3 = mul <8 x short> %tmp, %tmp2 |
| 15 | ret <8 x short> %tmp3 |
| 16 | } |
| 17 | |
| 18 | <16 x sbyte> %test_v16i8(<16 x sbyte>* %X, <16 x sbyte>* %Y) { |
| 19 | %tmp = load <16 x sbyte>* %X |
| 20 | %tmp2 = load <16 x sbyte>* %Y |
| 21 | %tmp3 = mul <16 x sbyte> %tmp, %tmp2 |
| 22 | ret <16 x sbyte> %tmp3 |
| 23 | } |
| 24 | |