Chris Lattner | 6bf9791 | 2006-03-22 19:12:46 +0000 | [diff] [blame] | 1 | ; Test that vectors are scalarized/lowered correctly. |
| 2 | ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vspltw | wc -l | grep 2 && |
Chris Lattner | 2ffc00a | 2006-04-04 17:20:45 +0000 | [diff] [blame] | 3 | ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g3 | grep stfs | wc -l | grep 4 && |
Chris Lattner | a7cdc88 | 2006-04-08 07:13:46 +0000 | [diff] [blame^] | 4 | ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 3 && |
Chris Lattner | 2ffc00a | 2006-04-04 17:20:45 +0000 | [diff] [blame] | 5 | ; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplth | wc -l | grep 1 |
Chris Lattner | 6bf9791 | 2006-03-22 19:12:46 +0000 | [diff] [blame] | 6 | |
| 7 | %f4 = type <4 x float> |
| 8 | %i4 = type <4 x int> |
| 9 | |
| 10 | implementation |
| 11 | |
| 12 | void %splat(%f4* %P, %f4* %Q, float %X) { |
| 13 | %tmp = insertelement %f4 undef, float %X, uint 0 |
| 14 | %tmp2 = insertelement %f4 %tmp, float %X, uint 1 |
| 15 | %tmp4 = insertelement %f4 %tmp2, float %X, uint 2 |
| 16 | %tmp6 = insertelement %f4 %tmp4, float %X, uint 3 |
| 17 | %q = load %f4* %Q |
| 18 | %R = add %f4 %q, %tmp6 |
| 19 | store %f4 %R, %f4* %P |
| 20 | ret void |
| 21 | } |
| 22 | |
| 23 | void %splat_i4(%i4* %P, %i4* %Q, int %X) { |
| 24 | %tmp = insertelement %i4 undef, int %X, uint 0 |
| 25 | %tmp2 = insertelement %i4 %tmp, int %X, uint 1 |
| 26 | %tmp4 = insertelement %i4 %tmp2, int %X, uint 2 |
| 27 | %tmp6 = insertelement %i4 %tmp4, int %X, uint 3 |
| 28 | %q = load %i4* %Q |
| 29 | %R = add %i4 %q, %tmp6 |
| 30 | store %i4 %R, %i4* %P |
| 31 | ret void |
| 32 | } |
| 33 | |
Chris Lattner | b45854f | 2006-03-25 06:11:56 +0000 | [diff] [blame] | 34 | void %splat_imm_i32(%i4* %P, %i4* %Q, int %X) { |
| 35 | %q = load %i4* %Q |
| 36 | %R = add %i4 %q, <int -1, int -1, int -1, int -1> |
| 37 | store %i4 %R, %i4* %P |
| 38 | ret void |
| 39 | } |
| 40 | |
| 41 | void %splat_imm_i16(%i4* %P, %i4* %Q, int %X) { |
| 42 | %q = load %i4* %Q |
| 43 | %R = add %i4 %q, <int 65537, int 65537, int 65537, int 65537> |
| 44 | store %i4 %R, %i4* %P |
| 45 | ret void |
| 46 | } |
| 47 | |
Chris Lattner | 2ffc00a | 2006-04-04 17:20:45 +0000 | [diff] [blame] | 48 | void %splat_h(short %tmp, <16 x ubyte>* %dst) { |
| 49 | %tmp = insertelement <8 x short> undef, short %tmp, uint 0 |
| 50 | %tmp72 = insertelement <8 x short> %tmp, short %tmp, uint 1 |
| 51 | %tmp73 = insertelement <8 x short> %tmp72, short %tmp, uint 2 |
| 52 | %tmp74 = insertelement <8 x short> %tmp73, short %tmp, uint 3 |
| 53 | %tmp75 = insertelement <8 x short> %tmp74, short %tmp, uint 4 |
| 54 | %tmp76 = insertelement <8 x short> %tmp75, short %tmp, uint 5 |
| 55 | %tmp77 = insertelement <8 x short> %tmp76, short %tmp, uint 6 |
| 56 | %tmp78 = insertelement <8 x short> %tmp77, short %tmp, uint 7 |
| 57 | %tmp78 = cast <8 x short> %tmp78 to <16 x ubyte> |
| 58 | store <16 x ubyte> %tmp78, <16 x ubyte>* %dst |
| 59 | ret void |
| 60 | } |
| 61 | |
Chris Lattner | a7cdc88 | 2006-04-08 07:13:46 +0000 | [diff] [blame^] | 62 | void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) { |
| 63 | ; Gets converted to 16 x ubyte |
| 64 | %tmp = load <16 x ubyte>* %B |
| 65 | %tmp = cast <16 x ubyte> %tmp to <16 x sbyte> |
| 66 | %tmp4 = sub <16 x sbyte> %tmp, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>) |
| 67 | %tmp4 = cast <16 x sbyte> %tmp4 to <16 x ubyte> |
| 68 | store <16 x ubyte> %tmp4, <16 x ubyte>* %A |
| 69 | ret void |
| 70 | } |
| 71 | |