blob: f6587b010e9fdd151d014811cce68b087aa3c41d [file] [log] [blame]
Chris Lattner6bf97912006-03-22 19:12:46 +00001; Test that vectors are scalarized/lowered correctly.
2; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vspltw | wc -l | grep 2 &&
Chris Lattner2ffc00a2006-04-04 17:20:45 +00003; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g3 | grep stfs | wc -l | grep 4 &&
Chris Lattnera7cdc882006-04-08 07:13:46 +00004; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplti | wc -l | grep 3 &&
Chris Lattner2ffc00a2006-04-04 17:20:45 +00005; RUN: llvm-as < %s | llc -march=ppc32 -mcpu=g5 | grep vsplth | wc -l | grep 1
Chris Lattner6bf97912006-03-22 19:12:46 +00006
7%f4 = type <4 x float>
8%i4 = type <4 x int>
9
10implementation
11
12void %splat(%f4* %P, %f4* %Q, float %X) {
13 %tmp = insertelement %f4 undef, float %X, uint 0
14 %tmp2 = insertelement %f4 %tmp, float %X, uint 1
15 %tmp4 = insertelement %f4 %tmp2, float %X, uint 2
16 %tmp6 = insertelement %f4 %tmp4, float %X, uint 3
17 %q = load %f4* %Q
18 %R = add %f4 %q, %tmp6
19 store %f4 %R, %f4* %P
20 ret void
21}
22
23void %splat_i4(%i4* %P, %i4* %Q, int %X) {
24 %tmp = insertelement %i4 undef, int %X, uint 0
25 %tmp2 = insertelement %i4 %tmp, int %X, uint 1
26 %tmp4 = insertelement %i4 %tmp2, int %X, uint 2
27 %tmp6 = insertelement %i4 %tmp4, int %X, uint 3
28 %q = load %i4* %Q
29 %R = add %i4 %q, %tmp6
30 store %i4 %R, %i4* %P
31 ret void
32}
33
Chris Lattnerb45854f2006-03-25 06:11:56 +000034void %splat_imm_i32(%i4* %P, %i4* %Q, int %X) {
35 %q = load %i4* %Q
36 %R = add %i4 %q, <int -1, int -1, int -1, int -1>
37 store %i4 %R, %i4* %P
38 ret void
39}
40
41void %splat_imm_i16(%i4* %P, %i4* %Q, int %X) {
42 %q = load %i4* %Q
43 %R = add %i4 %q, <int 65537, int 65537, int 65537, int 65537>
44 store %i4 %R, %i4* %P
45 ret void
46}
47
Chris Lattner2ffc00a2006-04-04 17:20:45 +000048void %splat_h(short %tmp, <16 x ubyte>* %dst) {
49 %tmp = insertelement <8 x short> undef, short %tmp, uint 0
50 %tmp72 = insertelement <8 x short> %tmp, short %tmp, uint 1
51 %tmp73 = insertelement <8 x short> %tmp72, short %tmp, uint 2
52 %tmp74 = insertelement <8 x short> %tmp73, short %tmp, uint 3
53 %tmp75 = insertelement <8 x short> %tmp74, short %tmp, uint 4
54 %tmp76 = insertelement <8 x short> %tmp75, short %tmp, uint 5
55 %tmp77 = insertelement <8 x short> %tmp76, short %tmp, uint 6
56 %tmp78 = insertelement <8 x short> %tmp77, short %tmp, uint 7
57 %tmp78 = cast <8 x short> %tmp78 to <16 x ubyte>
58 store <16 x ubyte> %tmp78, <16 x ubyte>* %dst
59 ret void
60}
61
Chris Lattnera7cdc882006-04-08 07:13:46 +000062void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) {
63 ; Gets converted to 16 x ubyte
64 %tmp = load <16 x ubyte>* %B
65 %tmp = cast <16 x ubyte> %tmp to <16 x sbyte>
66 %tmp4 = sub <16 x sbyte> %tmp, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>)
67 %tmp4 = cast <16 x sbyte> %tmp4 to <16 x ubyte>
68 store <16 x ubyte> %tmp4, <16 x ubyte>* %A
69 ret void
70}
71