blob: efebece09f42126772316fe44d3157901de9c69a [file] [log] [blame]
Bill Schmidtf910a062014-06-10 14:35:01 +00001; RUN: llc < %s -mtriple=powerpc64le-unknown-linux-gnu -mattr=+altivec | FileCheck %s
2
3define void @VPKUHUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
4entry:
5; CHECK: VPKUHUM_xy:
6 %tmp = load <16 x i8>* %A
7 %tmp2 = load <16 x i8>* %B
8 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 16, i32 18, i32 20, i32 22, i32 24, i32 26, i32 28, i32 30>
Ulrich Weigandcc9909b2014-08-04 13:53:40 +00009; CHECK: lvx [[REG1:[0-9]+]]
10; CHECK: lvx [[REG2:[0-9]+]]
11; CHECK: vpkuhum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +000012 store <16 x i8> %tmp3, <16 x i8>* %A
13 ret void
14}
15
16define void @VPKUHUM_xx(<16 x i8>* %A) {
17entry:
18; CHECK: VPKUHUM_xx:
19 %tmp = load <16 x i8>* %A
20 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14, i32 0, i32 2, i32 4, i32 6, i32 8, i32 10, i32 12, i32 14>
21; CHECK: vpkuhum
22 store <16 x i8> %tmp2, <16 x i8>* %A
23 ret void
24}
25
26define void @VPKUWUM_xy(<16 x i8>* %A, <16 x i8>* %B) {
27entry:
28; CHECK: VPKUWUM_xy:
29 %tmp = load <16 x i8>* %A
30 %tmp2 = load <16 x i8>* %B
31 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 16, i32 17, i32 20, i32 21, i32 24, i32 25, i32 28, i32 29>
Ulrich Weigandcc9909b2014-08-04 13:53:40 +000032; CHECK: lvx [[REG1:[0-9]+]]
33; CHECK: lvx [[REG2:[0-9]+]]
34; CHECK: vpkuwum [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +000035 store <16 x i8> %tmp3, <16 x i8>* %A
36 ret void
37}
38
39define void @VPKUWUM_xx(<16 x i8>* %A) {
40entry:
41; CHECK: VPKUWUM_xx:
42 %tmp = load <16 x i8>* %A
43 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13, i32 0, i32 1, i32 4, i32 5, i32 8, i32 9, i32 12, i32 13>
44; CHECK: vpkuwum
45 store <16 x i8> %tmp2, <16 x i8>* %A
46 ret void
47}
48
49define void @VMRGLB_xy(<16 x i8>* %A, <16 x i8>* %B) {
50entry:
51; CHECK: VMRGLB_xy:
52 %tmp = load <16 x i8>* %A
53 %tmp2 = load <16 x i8>* %B
54 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 16, i32 1, i32 17, i32 2, i32 18, i32 3, i32 19, i32 4, i32 20, i32 5, i32 21, i32 6, i32 22, i32 7, i32 23>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000055; CHECK: lvx [[REG1:[0-9]+]]
56; CHECK: lvx [[REG2:[0-9]+]]
57; CHECK: vmrglb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +000058 store <16 x i8> %tmp3, <16 x i8>* %A
59 ret void
60}
61
62define void @VMRGLB_xx(<16 x i8>* %A) {
63entry:
64; CHECK: VMRGLB_xx:
65 %tmp = load <16 x i8>* %A
66 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 0, i32 1, i32 1, i32 2, i32 2, i32 3, i32 3, i32 4, i32 4, i32 5, i32 5, i32 6, i32 6, i32 7, i32 7>
67; CHECK: vmrglb
68 store <16 x i8> %tmp2, <16 x i8>* %A
69 ret void
70}
71
72define void @VMRGHB_xy(<16 x i8>* %A, <16 x i8>* %B) {
73entry:
74; CHECK: VMRGHB_xy:
75 %tmp = load <16 x i8>* %A
76 %tmp2 = load <16 x i8>* %B
77 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 24, i32 9, i32 25, i32 10, i32 26, i32 11, i32 27, i32 12, i32 28, i32 13, i32 29, i32 14, i32 30, i32 15, i32 31>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +000078; CHECK: lvx [[REG1:[0-9]+]]
79; CHECK: lvx [[REG2:[0-9]+]]
80; CHECK: vmrghb [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +000081 store <16 x i8> %tmp3, <16 x i8>* %A
82 ret void
83}
84
85define void @VMRGHB_xx(<16 x i8>* %A) {
86entry:
87; CHECK: VMRGHB_xx:
88 %tmp = load <16 x i8>* %A
89 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 8, i32 9, i32 9, i32 10, i32 10, i32 11, i32 11, i32 12, i32 12, i32 13, i32 13, i32 14, i32 14, i32 15, i32 15>
90; CHECK: vmrghb
91 store <16 x i8> %tmp2, <16 x i8>* %A
92 ret void
93}
94
95define void @VMRGLH_xy(<16 x i8>* %A, <16 x i8>* %B) {
96entry:
97; CHECK: VMRGLH_xy:
98 %tmp = load <16 x i8>* %A
99 %tmp2 = load <16 x i8>* %B
100 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 16, i32 17, i32 2, i32 3, i32 18, i32 19, i32 4, i32 5, i32 20, i32 21, i32 6, i32 7, i32 22, i32 23>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000101; CHECK: lvx [[REG1:[0-9]+]]
102; CHECK: lvx [[REG2:[0-9]+]]
103; CHECK: vmrglh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +0000104 store <16 x i8> %tmp3, <16 x i8>* %A
105 ret void
106}
107
108define void @VMRGLH_xx(<16 x i8>* %A) {
109entry:
110; CHECK: VMRGLH_xx:
111 %tmp = load <16 x i8>* %A
112 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 0, i32 1, i32 2, i32 3, i32 2, i32 3, i32 4, i32 5, i32 4, i32 5, i32 6, i32 7, i32 6, i32 7>
113; CHECK: vmrglh
114 store <16 x i8> %tmp2, <16 x i8>* %A
115 ret void
116}
117
118define void @VMRGHH_xy(<16 x i8>* %A, <16 x i8>* %B) {
119entry:
120; CHECK: VMRGHH_xy:
121 %tmp = load <16 x i8>* %A
122 %tmp2 = load <16 x i8>* %B
123 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 24, i32 25, i32 10, i32 11, i32 26, i32 27, i32 12, i32 13, i32 28, i32 29, i32 14, i32 15, i32 30, i32 31>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000124; CHECK: lvx [[REG1:[0-9]+]]
125; CHECK: lvx [[REG2:[0-9]+]]
126; CHECK: vmrghh [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +0000127 store <16 x i8> %tmp3, <16 x i8>* %A
128 ret void
129}
130
131define void @VMRGHH_xx(<16 x i8>* %A) {
132entry:
133; CHECK: VMRGHH_xx:
134 %tmp = load <16 x i8>* %A
135 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 8, i32 9, i32 10, i32 11, i32 10, i32 11, i32 12, i32 13, i32 12, i32 13, i32 14, i32 15, i32 14, i32 15>
136; CHECK: vmrghh
137 store <16 x i8> %tmp2, <16 x i8>* %A
138 ret void
139}
140
141define void @VMRGLW_xy(<16 x i8>* %A, <16 x i8>* %B) {
142entry:
143; CHECK: VMRGLW_xy:
144 %tmp = load <16 x i8>* %A
145 %tmp2 = load <16 x i8>* %B
146 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 16, i32 17, i32 18, i32 19, i32 4, i32 5, i32 6, i32 7, i32 20, i32 21, i32 22, i32 23>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000147; CHECK: lvx [[REG1:[0-9]+]]
148; CHECK: lvx [[REG2:[0-9]+]]
149; CHECK: vmrglw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +0000150 store <16 x i8> %tmp3, <16 x i8>* %A
151 ret void
152}
153
154define void @VMRGLW_xx(<16 x i8>* %A) {
155entry:
156; CHECK: VMRGLW_xx:
157 %tmp = load <16 x i8>* %A
158 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 0, i32 1, i32 2, i32 3, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 4, i32 5, i32 6, i32 7>
159; CHECK: vmrglw
160 store <16 x i8> %tmp2, <16 x i8>* %A
161 ret void
162}
163
164define void @VMRGHW_xy(<16 x i8>* %A, <16 x i8>* %B) {
165entry:
166; CHECK: VMRGHW_xy:
167 %tmp = load <16 x i8>* %A
168 %tmp2 = load <16 x i8>* %B
169 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 24, i32 25, i32 26, i32 27, i32 12, i32 13, i32 14, i32 15, i32 28, i32 29, i32 30, i32 31>
Bill Schmidtc9fa5dd2014-07-25 01:55:55 +0000170; CHECK: lvx [[REG1:[0-9]+]]
171; CHECK: lvx [[REG2:[0-9]+]]
172; CHECK: vmrghw [[REG3:[0-9]+]], [[REG2]], [[REG1]]
Bill Schmidtf910a062014-06-10 14:35:01 +0000173 store <16 x i8> %tmp3, <16 x i8>* %A
174 ret void
175}
176
177define void @VMRGHW_xx(<16 x i8>* %A) {
178entry:
179; CHECK: VMRGHW_xx:
180 %tmp = load <16 x i8>* %A
181 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 8, i32 9, i32 10, i32 11, i32 8, i32 9, i32 10, i32 11, i32 12, i32 13, i32 14, i32 15, i32 12, i32 13, i32 14, i32 15>
182; CHECK: vmrghw
183 store <16 x i8> %tmp2, <16 x i8>* %A
184 ret void
185}
186
187define void @VSLDOI_xy(<16 x i8>* %A, <16 x i8>* %B) {
188entry:
189; CHECK: VSLDOI_xy:
190 %tmp = load <16 x i8>* %A
191 %tmp2 = load <16 x i8>* %B
Bill Schmidtf04e9982014-08-04 23:21:01 +0000192 %tmp3 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp2, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 16, i32 17, i32 18, i32 19, i32 20, i32 21, i32 22, i32 23, i32 24, i32 25, i32 26, i32 27>
Bill Schmidtf910a062014-06-10 14:35:01 +0000193; CHECK: vsldoi
194 store <16 x i8> %tmp3, <16 x i8>* %A
195 ret void
196}
197
198define void @VSLDOI_xx(<16 x i8>* %A) {
199entry:
200; CHECK: VSLDOI_xx:
201 %tmp = load <16 x i8>* %A
Bill Schmidtf04e9982014-08-04 23:21:01 +0000202 %tmp2 = shufflevector <16 x i8> %tmp, <16 x i8> %tmp, <16 x i32> <i32 12, i32 13, i32 14, i32 15, i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11>
Bill Schmidtf910a062014-06-10 14:35:01 +0000203; CHECK: vsldoi
204 store <16 x i8> %tmp2, <16 x i8>* %A
205 ret void
206}
207