blob: 635721c929d5e432296c72ade95e1412b0e3cd58 [file] [log] [blame]
Bill Schmidtf910a062014-06-10 14:35:01 +00001; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s
2
3define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
4entry:
5; CHECK: VPKUHUM_xy:
6 %tmp = load <16 x i8>* %A
7 %tmp2 = load <16 x i8>* %B
8 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
9; CHECK: vpkuhum
10 store <16 x i8> %tmp3, <16 x i8>* %A
11 ret void
12}
13
14define void @VPKUHUM_xx(<16 x i8>* %A) {
15entry:
16; CHECK: VPKUHUM_xx:
17 %tmp = load <16 x i8>* %A
18 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
19; CHECK: vpkuhum
20 store <16 x i8> %tmp2, <16 x i8>* %A
21 ret void
22}
23
24define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
25entry:
26; CHECK: VPKUWUM_xy:
27 %tmp = load <16 x i8>* %A
28 %tmp2 = load <16 x i8>* %B
29 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
30; CHECK: vpkuwum
31 store <16 x i8> %tmp3, <16 x i8>* %A
32 ret void
33}
34
35define void @VPKUWUM_xx(<16 x i8>* %A) {
36entry:
37; CHECK: VPKUWUM_xx:
38 %tmp = load <16 x i8>* %A
39 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
40; CHECK: vpkuwum
41 store <16 x i8> %tmp2, <16 x i8>* %A
42 ret void
43}
44
45define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
46entry:
47; CHECK: VMRGLB_xy:
48 %tmp = load <16 x i8>* %A
49 %tmp2 = load <16 x i8>* %B
50 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
51; CHECK: vmrglb
52 store <16 x i8> %tmp3, <16 x i8>* %A
53 ret void
54}
55
56define void @VMRGLB_xx(<16 x i8>* %A) {
57entry:
58; CHECK: VMRGLB_xx:
59 %tmp = load <16 x i8>* %A
60 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
61; CHECK: vmrglb
62 store <16 x i8> %tmp2, <16 x i8>* %A
63 ret void
64}
65
66define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
67entry:
68; CHECK: VMRGHB_xy:
69 %tmp = load <16 x i8>* %A
70 %tmp2 = load <16 x i8>* %B
71 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
72; CHECK: vmrghb
73 store <16 x i8> %tmp3, <16 x i8>* %A
74 ret void
75}
76
77define void @VMRGHB_xx(<16 x i8>* %A) {
78entry:
79; CHECK: VMRGHB_xx:
80 %tmp = load <16 x i8>* %A
81 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
82; CHECK: vmrghb
83 store <16 x i8> %tmp2, <16 x i8>* %A
84 ret void
85}
86
87define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
88entry:
89; CHECK: VMRGLH_xy:
90 %tmp = load <16 x i8>* %A
91 %tmp2 = load <16 x i8>* %B
92 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
93; CHECK: vmrglh
94 store <16 x i8> %tmp3, <16 x i8>* %A
95 ret void
96}
97
98define void @VMRGLH_xx(<16 x i8>* %A) {
99entry:
100; CHECK: VMRGLH_xx:
101 %tmp = load <16 x i8>* %A
102 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
103; CHECK: vmrglh
104 store <16 x i8> %tmp2, <16 x i8>* %A
105 ret void
106}
107
108define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
109entry:
110; CHECK: VMRGHH_xy:
111 %tmp = load <16 x i8>* %A
112 %tmp2 = load <16 x i8>* %B
113 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
114; CHECK: vmrghh
115 store <16 x i8> %tmp3, <16 x i8>* %A
116 ret void
117}
118
119define void @VMRGHH_xx(<16 x i8>* %A) {
120entry:
121; CHECK: VMRGHH_xx:
122 %tmp = load <16 x i8>* %A
123 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
124; CHECK: vmrghh
125 store <16 x i8> %tmp2, <16 x i8>* %A
126 ret void
127}
128
129define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
130entry:
131; CHECK: VMRGLW_xy:
132 %tmp = load <16 x i8>* %A
133 %tmp2 = load <16 x i8>* %B
134 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
135; CHECK: vmrglw
136 store <16 x i8> %tmp3, <16 x i8>* %A
137 ret void
138}
139
140define void @VMRGLW_xx(<16 x i8>* %A) {
141entry:
142; CHECK: VMRGLW_xx:
143 %tmp = load <16 x i8>* %A
144 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
145; CHECK: vmrglw
146 store <16 x i8> %tmp2, <16 x i8>* %A
147 ret void
148}
149
150define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
151entry:
152; CHECK: VMRGHW_xy:
153 %tmp = load <16 x i8>* %A
154 %tmp2 = load <16 x i8>* %B
155 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
156; CHECK: vmrghw
157 store <16 x i8> %tmp3, <16 x i8>* %A
158 ret void
159}
160
161define void @VMRGHW_xx(<16 x i8>* %A) {
162entry:
163; CHECK: VMRGHW_xx:
164 %tmp = load <16 x i8>* %A
165 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
166; CHECK: vmrghw
167 store <16 x i8> %tmp2, <16 x i8>* %A
168 ret void
169}
170
171define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
172entry:
173; CHECK: VSLDOI_xy:
174 %tmp = load <16 x i8>* %A
175 %tmp2 = load <16 x i8>* %B
176 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
177; CHECK: vsldoi
178 store <16 x i8> %tmp3, <16 x i8>* %A
179 ret void
180}
181
182define void @VSLDOI_xx(<16 x i8>* %A) {
183entry:
184; CHECK: VSLDOI_xx:
185 %tmp = load <16 x i8>* %A
186 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 3, i32 2, i32 1, i32 0, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4>
187; CHECK: vsldoi
188 store <16 x i8> %tmp2, <16 x i8>* %A
189 ret void
190}
191