blob: a8acee06264dcff20637597001e0219d2357d19c [file] [log] [blame]
Chris Lattner6bf97912006-03-22 19:12:46 +00001; Test that vectors are scalarized/lowered correctly.
Reid Spencereb1d74e2007-04-16 17:36:08 +00002; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g3 | \
Dan Gohman28beeea2007-08-15 13:36:28 +00003; RUN: grep stfs | count 4
Reid Spencereb1d74e2007-04-16 17:36:08 +00004; RUN: llvm-upgrade < %s | llvm-as | llc -march=ppc32 -mcpu=g5 -o %t -f
Dan Gohman28beeea2007-08-15 13:36:28 +00005; RUN: grep vspltw %t | count 2
6; RUN: grep vsplti %t | count 3
7; RUN: grep vsplth %t | count 1
Chris Lattner6bf97912006-03-22 19:12:46 +00008
9%f4 = type <4 x float>
10%i4 = type <4 x int>
11
12implementation
13
14void %splat(%f4* %P, %f4* %Q, float %X) {
15 %tmp = insertelement %f4 undef, float %X, uint 0
16 %tmp2 = insertelement %f4 %tmp, float %X, uint 1
17 %tmp4 = insertelement %f4 %tmp2, float %X, uint 2
18 %tmp6 = insertelement %f4 %tmp4, float %X, uint 3
19 %q = load %f4* %Q
20 %R = add %f4 %q, %tmp6
21 store %f4 %R, %f4* %P
22 ret void
23}
24
25void %splat_i4(%i4* %P, %i4* %Q, int %X) {
26 %tmp = insertelement %i4 undef, int %X, uint 0
27 %tmp2 = insertelement %i4 %tmp, int %X, uint 1
28 %tmp4 = insertelement %i4 %tmp2, int %X, uint 2
29 %tmp6 = insertelement %i4 %tmp4, int %X, uint 3
30 %q = load %i4* %Q
31 %R = add %i4 %q, %tmp6
32 store %i4 %R, %i4* %P
33 ret void
34}
35
Chris Lattnerb45854f2006-03-25 06:11:56 +000036void %splat_imm_i32(%i4* %P, %i4* %Q, int %X) {
37 %q = load %i4* %Q
38 %R = add %i4 %q, <int -1, int -1, int -1, int -1>
39 store %i4 %R, %i4* %P
40 ret void
41}
42
43void %splat_imm_i16(%i4* %P, %i4* %Q, int %X) {
44 %q = load %i4* %Q
45 %R = add %i4 %q, <int 65537, int 65537, int 65537, int 65537>
46 store %i4 %R, %i4* %P
47 ret void
48}
49
Chris Lattner2ffc00a2006-04-04 17:20:45 +000050void %splat_h(short %tmp, <16 x ubyte>* %dst) {
51 %tmp = insertelement <8 x short> undef, short %tmp, uint 0
52 %tmp72 = insertelement <8 x short> %tmp, short %tmp, uint 1
53 %tmp73 = insertelement <8 x short> %tmp72, short %tmp, uint 2
54 %tmp74 = insertelement <8 x short> %tmp73, short %tmp, uint 3
55 %tmp75 = insertelement <8 x short> %tmp74, short %tmp, uint 4
56 %tmp76 = insertelement <8 x short> %tmp75, short %tmp, uint 5
57 %tmp77 = insertelement <8 x short> %tmp76, short %tmp, uint 6
58 %tmp78 = insertelement <8 x short> %tmp77, short %tmp, uint 7
59 %tmp78 = cast <8 x short> %tmp78 to <16 x ubyte>
60 store <16 x ubyte> %tmp78, <16 x ubyte>* %dst
61 ret void
62}
63
Chris Lattnera7cdc882006-04-08 07:13:46 +000064void %spltish(<16 x ubyte>* %A, <16 x ubyte>* %B) {
65 ; Gets converted to 16 x ubyte
66 %tmp = load <16 x ubyte>* %B
Reid Spencereacb7022006-12-31 06:02:00 +000067 %tmp.s = cast <16 x ubyte> %tmp to <16 x sbyte>
68 %tmp4 = sub <16 x sbyte> %tmp.s, cast (<8 x short> < short 15, short 15, short 15, short 15, short 15, short 15, short 15, short 15 > to <16 x sbyte>)
69 %tmp4.u = cast <16 x sbyte> %tmp4 to <16 x ubyte>
70 store <16 x ubyte> %tmp4.u, <16 x ubyte>* %A
Chris Lattnera7cdc882006-04-08 07:13:46 +000071 ret void
72}
73