blob: 9f2f0661f99793287a8351fa018b479506ea38c5 [file] [log] [blame]
Akira Hatanakacd6c5792013-04-30 22:37:26 +00001; RUN: llc -march=mips -mattr=dsp < %s | FileCheck %s
2
3; CHECK: select_v2q15_eq_:
4; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
5; CHECK: pick.ph ${{[0-9]+}}, $6, $7
6
7define { i32 } @select_v2q15_eq_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
8entry:
9 %0 = bitcast i32 %a0.coerce to <2 x i16>
10 %1 = bitcast i32 %a1.coerce to <2 x i16>
11 %2 = bitcast i32 %a2.coerce to <2 x i16>
12 %3 = bitcast i32 %a3.coerce to <2 x i16>
13 %cmp = icmp eq <2 x i16> %0, %1
14 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
15 %4 = bitcast <2 x i16> %or to i32
16 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
17 ret { i32 } %.fca.0.insert
18}
19
20; CHECK: select_v2q15_lt_:
21; CHECK: cmp.lt.ph $4, $5
22; CHECK: pick.ph ${{[0-9]+}}, $6, $7
23
24define { i32 } @select_v2q15_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
25entry:
26 %0 = bitcast i32 %a0.coerce to <2 x i16>
27 %1 = bitcast i32 %a1.coerce to <2 x i16>
28 %2 = bitcast i32 %a2.coerce to <2 x i16>
29 %3 = bitcast i32 %a3.coerce to <2 x i16>
30 %cmp = icmp slt <2 x i16> %0, %1
31 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
32 %4 = bitcast <2 x i16> %or to i32
33 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
34 ret { i32 } %.fca.0.insert
35}
36
37; CHECK: select_v2q15_le_:
38; CHECK: cmp.le.ph $4, $5
39; CHECK: pick.ph ${{[0-9]+}}, $6, $7
40
41define { i32 } @select_v2q15_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
42entry:
43 %0 = bitcast i32 %a0.coerce to <2 x i16>
44 %1 = bitcast i32 %a1.coerce to <2 x i16>
45 %2 = bitcast i32 %a2.coerce to <2 x i16>
46 %3 = bitcast i32 %a3.coerce to <2 x i16>
47 %cmp = icmp sle <2 x i16> %0, %1
48 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
49 %4 = bitcast <2 x i16> %or to i32
50 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
51 ret { i32 } %.fca.0.insert
52}
53
54; CHECK: select_v2q15_ne_:
55; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
56; CHECK: pick.ph ${{[0-9]+}}, $7, $6
57
58define { i32 } @select_v2q15_ne_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
59entry:
60 %0 = bitcast i32 %a0.coerce to <2 x i16>
61 %1 = bitcast i32 %a1.coerce to <2 x i16>
62 %2 = bitcast i32 %a2.coerce to <2 x i16>
63 %3 = bitcast i32 %a3.coerce to <2 x i16>
64 %cmp = icmp ne <2 x i16> %0, %1
65 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
66 %4 = bitcast <2 x i16> %or to i32
67 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
68 ret { i32 } %.fca.0.insert
69}
70
71; CHECK: select_v2q15_gt_:
72; CHECK: cmp.le.ph $4, $5
73; CHECK: pick.ph ${{[0-9]+}}, $7, $6
74
75define { i32 } @select_v2q15_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
76entry:
77 %0 = bitcast i32 %a0.coerce to <2 x i16>
78 %1 = bitcast i32 %a1.coerce to <2 x i16>
79 %2 = bitcast i32 %a2.coerce to <2 x i16>
80 %3 = bitcast i32 %a3.coerce to <2 x i16>
81 %cmp = icmp sgt <2 x i16> %0, %1
82 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
83 %4 = bitcast <2 x i16> %or to i32
84 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
85 ret { i32 } %.fca.0.insert
86}
87
88; CHECK: select_v2q15_ge_:
89; CHECK: cmp.lt.ph $4, $5
90; CHECK: pick.ph ${{[0-9]+}}, $7, $6
91
92define { i32 } @select_v2q15_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
93entry:
94 %0 = bitcast i32 %a0.coerce to <2 x i16>
95 %1 = bitcast i32 %a1.coerce to <2 x i16>
96 %2 = bitcast i32 %a2.coerce to <2 x i16>
97 %3 = bitcast i32 %a3.coerce to <2 x i16>
98 %cmp = icmp sge <2 x i16> %0, %1
99 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
100 %4 = bitcast <2 x i16> %or to i32
101 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
102 ret { i32 } %.fca.0.insert
103}
104
105; CHECK: select_v4ui8_eq_:
106; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
107; CHECK: pick.qb ${{[0-9]+}}, $6, $7
108
109define { i32 } @select_v4ui8_eq_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
110entry:
111 %0 = bitcast i32 %a0.coerce to <4 x i8>
112 %1 = bitcast i32 %a1.coerce to <4 x i8>
113 %2 = bitcast i32 %a2.coerce to <4 x i8>
114 %3 = bitcast i32 %a3.coerce to <4 x i8>
115 %cmp = icmp eq <4 x i8> %0, %1
116 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
117 %4 = bitcast <4 x i8> %or to i32
118 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
119 ret { i32 } %.fca.0.insert
120}
121
122; CHECK: select_v4ui8_lt_:
123; CHECK: cmpu.lt.qb $4, $5
124; CHECK: pick.qb ${{[0-9]+}}, $6, $7
125
126define { i32 } @select_v4ui8_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
127entry:
128 %0 = bitcast i32 %a0.coerce to <4 x i8>
129 %1 = bitcast i32 %a1.coerce to <4 x i8>
130 %2 = bitcast i32 %a2.coerce to <4 x i8>
131 %3 = bitcast i32 %a3.coerce to <4 x i8>
132 %cmp = icmp ult <4 x i8> %0, %1
133 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
134 %4 = bitcast <4 x i8> %or to i32
135 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
136 ret { i32 } %.fca.0.insert
137}
138
139; CHECK: select_v4ui8_le_:
140; CHECK: cmpu.le.qb $4, $5
141; CHECK: pick.qb ${{[0-9]+}}, $6, $7
142
143define { i32 } @select_v4ui8_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
144entry:
145 %0 = bitcast i32 %a0.coerce to <4 x i8>
146 %1 = bitcast i32 %a1.coerce to <4 x i8>
147 %2 = bitcast i32 %a2.coerce to <4 x i8>
148 %3 = bitcast i32 %a3.coerce to <4 x i8>
149 %cmp = icmp ule <4 x i8> %0, %1
150 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
151 %4 = bitcast <4 x i8> %or to i32
152 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
153 ret { i32 } %.fca.0.insert
154}
155
156; CHECK: select_v4ui8_ne_:
157; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
158; CHECK: pick.qb ${{[0-9]+}}, $7, $6
159
160define { i32 } @select_v4ui8_ne_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
161entry:
162 %0 = bitcast i32 %a0.coerce to <4 x i8>
163 %1 = bitcast i32 %a1.coerce to <4 x i8>
164 %2 = bitcast i32 %a2.coerce to <4 x i8>
165 %3 = bitcast i32 %a3.coerce to <4 x i8>
166 %cmp = icmp ne <4 x i8> %0, %1
167 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
168 %4 = bitcast <4 x i8> %or to i32
169 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
170 ret { i32 } %.fca.0.insert
171}
172
173; CHECK: select_v4ui8_gt_:
174; CHECK: cmpu.le.qb $4, $5
175; CHECK: pick.qb ${{[0-9]+}}, $7, $6
176
177define { i32 } @select_v4ui8_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
178entry:
179 %0 = bitcast i32 %a0.coerce to <4 x i8>
180 %1 = bitcast i32 %a1.coerce to <4 x i8>
181 %2 = bitcast i32 %a2.coerce to <4 x i8>
182 %3 = bitcast i32 %a3.coerce to <4 x i8>
183 %cmp = icmp ugt <4 x i8> %0, %1
184 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
185 %4 = bitcast <4 x i8> %or to i32
186 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
187 ret { i32 } %.fca.0.insert
188}
189
190; CHECK: select_v4ui8_ge_:
191; CHECK: cmpu.lt.qb $4, $5
192; CHECK: pick.qb ${{[0-9]+}}, $7, $6
193
194define { i32 } @select_v4ui8_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
195entry:
196 %0 = bitcast i32 %a0.coerce to <4 x i8>
197 %1 = bitcast i32 %a1.coerce to <4 x i8>
198 %2 = bitcast i32 %a2.coerce to <4 x i8>
199 %3 = bitcast i32 %a3.coerce to <4 x i8>
200 %cmp = icmp uge <4 x i8> %0, %1
201 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
202 %4 = bitcast <4 x i8> %or to i32
203 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
204 ret { i32 } %.fca.0.insert
205}
206
207; CHECK: select_v2ui16_lt_:
208; CHECK-NOT: cmp
209; CHECK-NOT: pick
210
211define { i32 } @select_v2ui16_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
212entry:
213 %0 = bitcast i32 %a0.coerce to <2 x i16>
214 %1 = bitcast i32 %a1.coerce to <2 x i16>
215 %2 = bitcast i32 %a2.coerce to <2 x i16>
216 %3 = bitcast i32 %a3.coerce to <2 x i16>
217 %cmp = icmp ult <2 x i16> %0, %1
218 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
219 %4 = bitcast <2 x i16> %or to i32
220 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
221 ret { i32 } %.fca.0.insert
222}
223
224; CHECK: select_v2ui16_le_:
225; CHECK-NOT: cmp
226; CHECK-NOT: pick
227
228define { i32 } @select_v2ui16_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
229entry:
230 %0 = bitcast i32 %a0.coerce to <2 x i16>
231 %1 = bitcast i32 %a1.coerce to <2 x i16>
232 %2 = bitcast i32 %a2.coerce to <2 x i16>
233 %3 = bitcast i32 %a3.coerce to <2 x i16>
234 %cmp = icmp ule <2 x i16> %0, %1
235 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
236 %4 = bitcast <2 x i16> %or to i32
237 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
238 ret { i32 } %.fca.0.insert
239}
240
241; CHECK: select_v2ui16_gt_:
242; CHECK-NOT: cmp
243; CHECK-NOT: pick
244
245define { i32 } @select_v2ui16_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
246entry:
247 %0 = bitcast i32 %a0.coerce to <2 x i16>
248 %1 = bitcast i32 %a1.coerce to <2 x i16>
249 %2 = bitcast i32 %a2.coerce to <2 x i16>
250 %3 = bitcast i32 %a3.coerce to <2 x i16>
251 %cmp = icmp ugt <2 x i16> %0, %1
252 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
253 %4 = bitcast <2 x i16> %or to i32
254 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
255 ret { i32 } %.fca.0.insert
256}
257
258; CHECK: select_v2ui16_ge_:
259; CHECK-NOT: cmp
260; CHECK-NOT: pick
261
262define { i32 } @select_v2ui16_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
263entry:
264 %0 = bitcast i32 %a0.coerce to <2 x i16>
265 %1 = bitcast i32 %a1.coerce to <2 x i16>
266 %2 = bitcast i32 %a2.coerce to <2 x i16>
267 %3 = bitcast i32 %a3.coerce to <2 x i16>
268 %cmp = icmp uge <2 x i16> %0, %1
269 %or = select <2 x i1> %cmp, <2 x i16> %2, <2 x i16> %3
270 %4 = bitcast <2 x i16> %or to i32
271 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
272 ret { i32 } %.fca.0.insert
273}
274
275; CHECK: select_v4i8_lt_:
276; CHECK-NOT: cmp
277; CHECK-NOT: pick
278
279define { i32 } @select_v4i8_lt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
280entry:
281 %0 = bitcast i32 %a0.coerce to <4 x i8>
282 %1 = bitcast i32 %a1.coerce to <4 x i8>
283 %2 = bitcast i32 %a2.coerce to <4 x i8>
284 %3 = bitcast i32 %a3.coerce to <4 x i8>
285 %cmp = icmp slt <4 x i8> %0, %1
286 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
287 %4 = bitcast <4 x i8> %or to i32
288 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
289 ret { i32 } %.fca.0.insert
290}
291
292; CHECK: select_v4i8_le_:
293; CHECK-NOT: cmp
294; CHECK-NOT: pick
295
296define { i32 } @select_v4i8_le_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
297entry:
298 %0 = bitcast i32 %a0.coerce to <4 x i8>
299 %1 = bitcast i32 %a1.coerce to <4 x i8>
300 %2 = bitcast i32 %a2.coerce to <4 x i8>
301 %3 = bitcast i32 %a3.coerce to <4 x i8>
302 %cmp = icmp sle <4 x i8> %0, %1
303 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
304 %4 = bitcast <4 x i8> %or to i32
305 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
306 ret { i32 } %.fca.0.insert
307}
308
309; CHECK: select_v4i8_gt_:
310; CHECK-NOT: cmp
311; CHECK-NOT: pick
312
313define { i32 } @select_v4i8_gt_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
314entry:
315 %0 = bitcast i32 %a0.coerce to <4 x i8>
316 %1 = bitcast i32 %a1.coerce to <4 x i8>
317 %2 = bitcast i32 %a2.coerce to <4 x i8>
318 %3 = bitcast i32 %a3.coerce to <4 x i8>
319 %cmp = icmp sgt <4 x i8> %0, %1
320 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
321 %4 = bitcast <4 x i8> %or to i32
322 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
323 ret { i32 } %.fca.0.insert
324}
325
326; CHECK: select_v4i8_ge_:
327; CHECK-NOT: cmp
328; CHECK-NOT: pick
329
330define { i32 } @select_v4i8_ge_(i32 %a0.coerce, i32 %a1.coerce, i32 %a2.coerce, i32 %a3.coerce) {
331entry:
332 %0 = bitcast i32 %a0.coerce to <4 x i8>
333 %1 = bitcast i32 %a1.coerce to <4 x i8>
334 %2 = bitcast i32 %a2.coerce to <4 x i8>
335 %3 = bitcast i32 %a3.coerce to <4 x i8>
336 %cmp = icmp sge <4 x i8> %0, %1
337 %or = select <4 x i1> %cmp, <4 x i8> %2, <4 x i8> %3
338 %4 = bitcast <4 x i8> %or to i32
339 %.fca.0.insert = insertvalue { i32 } undef, i32 %4, 0
340 ret { i32 } %.fca.0.insert
341}
342
343; CHECK: compare_v2q15_eq_:
344; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
345; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
346
347define { i32 } @compare_v2q15_eq_(i32 %a0.coerce, i32 %a1.coerce) {
348entry:
349 %0 = bitcast i32 %a0.coerce to <2 x i16>
350 %1 = bitcast i32 %a1.coerce to <2 x i16>
351 %cmp = icmp eq <2 x i16> %0, %1
352 %sext = sext <2 x i1> %cmp to <2 x i16>
353 %2 = bitcast <2 x i16> %sext to i32
354 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
355 ret { i32 } %.fca.0.insert
356}
357
358; CHECK: compare_v2q15_lt_:
359; CHECK: cmp.lt.ph $4, $5
360; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
361
362define { i32 } @compare_v2q15_lt_(i32 %a0.coerce, i32 %a1.coerce) {
363entry:
364 %0 = bitcast i32 %a0.coerce to <2 x i16>
365 %1 = bitcast i32 %a1.coerce to <2 x i16>
366 %cmp = icmp slt <2 x i16> %0, %1
367 %sext = sext <2 x i1> %cmp to <2 x i16>
368 %2 = bitcast <2 x i16> %sext to i32
369 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
370 ret { i32 } %.fca.0.insert
371}
372
373; CHECK: compare_v2q15_le_:
374; CHECK: cmp.le.ph $4, $5
375; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
376
377define { i32 } @compare_v2q15_le_(i32 %a0.coerce, i32 %a1.coerce) {
378entry:
379 %0 = bitcast i32 %a0.coerce to <2 x i16>
380 %1 = bitcast i32 %a1.coerce to <2 x i16>
381 %cmp = icmp sle <2 x i16> %0, %1
382 %sext = sext <2 x i1> %cmp to <2 x i16>
383 %2 = bitcast <2 x i16> %sext to i32
384 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
385 ret { i32 } %.fca.0.insert
386}
387
388; CHECK: compare_v2q15_ne_:
389; CHECK: cmp.eq.ph ${{[0-9]+}}, ${{[0-9]+}}
390; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
391
392define { i32 } @compare_v2q15_ne_(i32 %a0.coerce, i32 %a1.coerce) {
393entry:
394 %0 = bitcast i32 %a0.coerce to <2 x i16>
395 %1 = bitcast i32 %a1.coerce to <2 x i16>
396 %cmp = icmp ne <2 x i16> %0, %1
397 %sext = sext <2 x i1> %cmp to <2 x i16>
398 %2 = bitcast <2 x i16> %sext to i32
399 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
400 ret { i32 } %.fca.0.insert
401}
402
403; CHECK: compare_v2q15_gt_:
404; CHECK: cmp.le.ph $4, $5
405; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
406
407define { i32 } @compare_v2q15_gt_(i32 %a0.coerce, i32 %a1.coerce) {
408entry:
409 %0 = bitcast i32 %a0.coerce to <2 x i16>
410 %1 = bitcast i32 %a1.coerce to <2 x i16>
411 %cmp = icmp sgt <2 x i16> %0, %1
412 %sext = sext <2 x i1> %cmp to <2 x i16>
413 %2 = bitcast <2 x i16> %sext to i32
414 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
415 ret { i32 } %.fca.0.insert
416}
417
418; CHECK: compare_v2q15_ge_:
419; CHECK: cmp.lt.ph $4, $5
420; CHECK: pick.ph ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
421
422define { i32 } @compare_v2q15_ge_(i32 %a0.coerce, i32 %a1.coerce) {
423entry:
424 %0 = bitcast i32 %a0.coerce to <2 x i16>
425 %1 = bitcast i32 %a1.coerce to <2 x i16>
426 %cmp = icmp sge <2 x i16> %0, %1
427 %sext = sext <2 x i1> %cmp to <2 x i16>
428 %2 = bitcast <2 x i16> %sext to i32
429 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
430 ret { i32 } %.fca.0.insert
431}
432
433; CHECK: compare_v4ui8_eq_:
434; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
435; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
436
437define { i32 } @compare_v4ui8_eq_(i32 %a0.coerce, i32 %a1.coerce) {
438entry:
439 %0 = bitcast i32 %a0.coerce to <4 x i8>
440 %1 = bitcast i32 %a1.coerce to <4 x i8>
441 %cmp = icmp eq <4 x i8> %0, %1
442 %sext = sext <4 x i1> %cmp to <4 x i8>
443 %2 = bitcast <4 x i8> %sext to i32
444 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
445 ret { i32 } %.fca.0.insert
446}
447
448; CHECK: compare_v4ui8_lt_:
449; CHECK: cmpu.lt.qb $4, $5
450; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
451
452define { i32 } @compare_v4ui8_lt_(i32 %a0.coerce, i32 %a1.coerce) {
453entry:
454 %0 = bitcast i32 %a0.coerce to <4 x i8>
455 %1 = bitcast i32 %a1.coerce to <4 x i8>
456 %cmp = icmp ult <4 x i8> %0, %1
457 %sext = sext <4 x i1> %cmp to <4 x i8>
458 %2 = bitcast <4 x i8> %sext to i32
459 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
460 ret { i32 } %.fca.0.insert
461}
462
463; CHECK: compare_v4ui8_le_:
464; CHECK: cmpu.le.qb $4, $5
465; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
466
467define { i32 } @compare_v4ui8_le_(i32 %a0.coerce, i32 %a1.coerce) {
468entry:
469 %0 = bitcast i32 %a0.coerce to <4 x i8>
470 %1 = bitcast i32 %a1.coerce to <4 x i8>
471 %cmp = icmp ule <4 x i8> %0, %1
472 %sext = sext <4 x i1> %cmp to <4 x i8>
473 %2 = bitcast <4 x i8> %sext to i32
474 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
475 ret { i32 } %.fca.0.insert
476}
477
478; CHECK: compare_v4ui8_ne_:
479; CHECK: cmpu.eq.qb ${{[0-9]+}}, ${{[0-9]+}}
480; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
481
482define { i32 } @compare_v4ui8_ne_(i32 %a0.coerce, i32 %a1.coerce) {
483entry:
484 %0 = bitcast i32 %a0.coerce to <4 x i8>
485 %1 = bitcast i32 %a1.coerce to <4 x i8>
486 %cmp = icmp ne <4 x i8> %0, %1
487 %sext = sext <4 x i1> %cmp to <4 x i8>
488 %2 = bitcast <4 x i8> %sext to i32
489 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
490 ret { i32 } %.fca.0.insert
491}
492
493; CHECK: compare_v4ui8_gt_:
494; CHECK: cmpu.le.qb $4, $5
495; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
496
497define { i32 } @compare_v4ui8_gt_(i32 %a0.coerce, i32 %a1.coerce) {
498entry:
499 %0 = bitcast i32 %a0.coerce to <4 x i8>
500 %1 = bitcast i32 %a1.coerce to <4 x i8>
501 %cmp = icmp ugt <4 x i8> %0, %1
502 %sext = sext <4 x i1> %cmp to <4 x i8>
503 %2 = bitcast <4 x i8> %sext to i32
504 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
505 ret { i32 } %.fca.0.insert
506}
507
508; CHECK: compare_v4ui8_ge_:
509; CHECK: cmpu.lt.qb $4, $5
510; CHECK: pick.qb ${{[0-9]+}}, ${{[a-z0-9]+}}, ${{[a-z0-9]+}}
511
512define { i32 } @compare_v4ui8_ge_(i32 %a0.coerce, i32 %a1.coerce) {
513entry:
514 %0 = bitcast i32 %a0.coerce to <4 x i8>
515 %1 = bitcast i32 %a1.coerce to <4 x i8>
516 %cmp = icmp uge <4 x i8> %0, %1
517 %sext = sext <4 x i1> %cmp to <4 x i8>
518 %2 = bitcast <4 x i8> %sext to i32
519 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
520 ret { i32 } %.fca.0.insert
521}
522
523; CHECK: compare_v2ui16_lt_:
524; CHECK-NOT: cmp
525; CHECK-NOT: pick
526
527define { i32 } @compare_v2ui16_lt_(i32 %a0.coerce, i32 %a1.coerce) {
528entry:
529 %0 = bitcast i32 %a0.coerce to <2 x i16>
530 %1 = bitcast i32 %a1.coerce to <2 x i16>
531 %cmp = icmp ult <2 x i16> %0, %1
532 %sext = sext <2 x i1> %cmp to <2 x i16>
533 %2 = bitcast <2 x i16> %sext to i32
534 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
535 ret { i32 } %.fca.0.insert
536}
537
538; CHECK: compare_v2ui16_le_:
539; CHECK-NOT: cmp
540; CHECK-NOT: pick
541
542define { i32 } @compare_v2ui16_le_(i32 %a0.coerce, i32 %a1.coerce) {
543entry:
544 %0 = bitcast i32 %a0.coerce to <2 x i16>
545 %1 = bitcast i32 %a1.coerce to <2 x i16>
546 %cmp = icmp ule <2 x i16> %0, %1
547 %sext = sext <2 x i1> %cmp to <2 x i16>
548 %2 = bitcast <2 x i16> %sext to i32
549 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
550 ret { i32 } %.fca.0.insert
551}
552
553; CHECK: compare_v2ui16_gt_:
554; CHECK-NOT: cmp
555; CHECK-NOT: pick
556
557define { i32 } @compare_v2ui16_gt_(i32 %a0.coerce, i32 %a1.coerce) {
558entry:
559 %0 = bitcast i32 %a0.coerce to <2 x i16>
560 %1 = bitcast i32 %a1.coerce to <2 x i16>
561 %cmp = icmp ugt <2 x i16> %0, %1
562 %sext = sext <2 x i1> %cmp to <2 x i16>
563 %2 = bitcast <2 x i16> %sext to i32
564 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
565 ret { i32 } %.fca.0.insert
566}
567
568; CHECK: compare_v2ui16_ge_:
569; CHECK-NOT: cmp
570; CHECK-NOT: pick
571
572define { i32 } @compare_v2ui16_ge_(i32 %a0.coerce, i32 %a1.coerce) {
573entry:
574 %0 = bitcast i32 %a0.coerce to <2 x i16>
575 %1 = bitcast i32 %a1.coerce to <2 x i16>
576 %cmp = icmp uge <2 x i16> %0, %1
577 %sext = sext <2 x i1> %cmp to <2 x i16>
578 %2 = bitcast <2 x i16> %sext to i32
579 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
580 ret { i32 } %.fca.0.insert
581}
582
583; CHECK: compare_v4i8_lt_:
584; CHECK-NOT: cmp
585; CHECK-NOT: pick
586
587define { i32 } @compare_v4i8_lt_(i32 %a0.coerce, i32 %a1.coerce) {
588entry:
589 %0 = bitcast i32 %a0.coerce to <4 x i8>
590 %1 = bitcast i32 %a1.coerce to <4 x i8>
591 %cmp = icmp slt <4 x i8> %0, %1
592 %sext = sext <4 x i1> %cmp to <4 x i8>
593 %2 = bitcast <4 x i8> %sext to i32
594 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
595 ret { i32 } %.fca.0.insert
596}
597
598; CHECK: compare_v4i8_le_:
599; CHECK-NOT: cmp
600; CHECK-NOT: pick
601
602define { i32 } @compare_v4i8_le_(i32 %a0.coerce, i32 %a1.coerce) {
603entry:
604 %0 = bitcast i32 %a0.coerce to <4 x i8>
605 %1 = bitcast i32 %a1.coerce to <4 x i8>
606 %cmp = icmp sle <4 x i8> %0, %1
607 %sext = sext <4 x i1> %cmp to <4 x i8>
608 %2 = bitcast <4 x i8> %sext to i32
609 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
610 ret { i32 } %.fca.0.insert
611}
612
613; CHECK: compare_v4i8_gt_:
614; CHECK-NOT: cmp
615; CHECK-NOT: pick
616
617define { i32 } @compare_v4i8_gt_(i32 %a0.coerce, i32 %a1.coerce) {
618entry:
619 %0 = bitcast i32 %a0.coerce to <4 x i8>
620 %1 = bitcast i32 %a1.coerce to <4 x i8>
621 %cmp = icmp sgt <4 x i8> %0, %1
622 %sext = sext <4 x i1> %cmp to <4 x i8>
623 %2 = bitcast <4 x i8> %sext to i32
624 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
625 ret { i32 } %.fca.0.insert
626}
627
628; CHECK: compare_v4i8_ge_:
629; CHECK-NOT: cmp
630; CHECK-NOT: pick
631
632define { i32 } @compare_v4i8_ge_(i32 %a0.coerce, i32 %a1.coerce) {
633entry:
634 %0 = bitcast i32 %a0.coerce to <4 x i8>
635 %1 = bitcast i32 %a1.coerce to <4 x i8>
636 %cmp = icmp sge <4 x i8> %0, %1
637 %sext = sext <4 x i1> %cmp to <4 x i8>
638 %2 = bitcast <4 x i8> %sext to i32
639 %.fca.0.insert = insertvalue { i32 } undef, i32 %2, 0
640 ret { i32 } %.fca.0.insert
641}