blob: 6761be6d4e1fde058216205c4e271397742efbcb [file] [log] [blame]
Juergen Ributzka21d56082014-06-23 21:55:40 +00001; RUN: llc < %s -mtriple=x86_64-apple-darwin10 | FileCheck %s
Mehdi Amini945a6602015-02-27 18:32:11 +00002; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 | FileCheck %s
Juergen Ributzka21d56082014-06-23 21:55:40 +00003; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
Mehdi Amini945a6602015-02-27 18:32:11 +00004; RUN: llc < %s -mtriple=x86_64-apple-darwin10 -fast-isel -fast-isel-abort=1 -mcpu=corei7-avx | FileCheck %s --check-prefix=AVX
Juergen Ributzka21d56082014-06-23 21:55:40 +00005
6; Test all cmp predicates that can be used with SSE.
7
8define float @select_fcmp_oeq_f32(float %a, float %b, float %c, float %d) {
9; CHECK-LABEL: select_fcmp_oeq_f32
10; CHECK: cmpeqss %xmm1, %xmm0
11; CHECK-NEXT: andps %xmm0, %xmm2
12; CHECK-NEXT: andnps %xmm3, %xmm0
13; CHECK-NEXT: orps %xmm2, %xmm0
14; AVX-LABEL: select_fcmp_oeq_f32
15; AVX: vcmpeqss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000016; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000017 %1 = fcmp oeq float %a, %b
18 %2 = select i1 %1, float %c, float %d
19 ret float %2
20}
21
22define double @select_fcmp_oeq_f64(double %a, double %b, double %c, double %d) {
23; CHECK-LABEL: select_fcmp_oeq_f64
24; CHECK: cmpeqsd %xmm1, %xmm0
25; CHECK-NEXT: andpd %xmm0, %xmm2
26; CHECK-NEXT: andnpd %xmm3, %xmm0
27; CHECK-NEXT: orpd %xmm2, %xmm0
28; AVX-LABEL: select_fcmp_oeq_f64
29; AVX: vcmpeqsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000030; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000031 %1 = fcmp oeq double %a, %b
32 %2 = select i1 %1, double %c, double %d
33 ret double %2
34}
35
36define float @select_fcmp_ogt_f32(float %a, float %b, float %c, float %d) {
37; CHECK-LABEL: select_fcmp_ogt_f32
38; CHECK: cmpltss %xmm0, %xmm1
39; CHECK-NEXT: andps %xmm1, %xmm2
40; CHECK-NEXT: andnps %xmm3, %xmm1
41; CHECK-NEXT: orps %xmm2, %xmm1
42; AVX-LABEL: select_fcmp_ogt_f32
43; AVX: vcmpltss %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000044; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000045 %1 = fcmp ogt float %a, %b
46 %2 = select i1 %1, float %c, float %d
47 ret float %2
48}
49
50define double @select_fcmp_ogt_f64(double %a, double %b, double %c, double %d) {
51; CHECK-LABEL: select_fcmp_ogt_f64
52; CHECK: cmpltsd %xmm0, %xmm1
53; CHECK-NEXT: andpd %xmm1, %xmm2
54; CHECK-NEXT: andnpd %xmm3, %xmm1
55; CHECK-NEXT: orpd %xmm2, %xmm1
56; AVX-LABEL: select_fcmp_ogt_f64
57; AVX: vcmpltsd %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000058; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000059 %1 = fcmp ogt double %a, %b
60 %2 = select i1 %1, double %c, double %d
61 ret double %2
62}
63
64define float @select_fcmp_oge_f32(float %a, float %b, float %c, float %d) {
65; CHECK-LABEL: select_fcmp_oge_f32
66; CHECK: cmpless %xmm0, %xmm1
67; CHECK-NEXT: andps %xmm1, %xmm2
68; CHECK-NEXT: andnps %xmm3, %xmm1
69; CHECK-NEXT: orps %xmm2, %xmm1
70; AVX-LABEL: select_fcmp_oge_f32
71; AVX: vcmpless %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000072; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000073 %1 = fcmp oge float %a, %b
74 %2 = select i1 %1, float %c, float %d
75 ret float %2
76}
77
78define double @select_fcmp_oge_f64(double %a, double %b, double %c, double %d) {
79; CHECK-LABEL: select_fcmp_oge_f64
80; CHECK: cmplesd %xmm0, %xmm1
81; CHECK-NEXT: andpd %xmm1, %xmm2
82; CHECK-NEXT: andnpd %xmm3, %xmm1
83; CHECK-NEXT: orpd %xmm2, %xmm1
84; AVX-LABEL: select_fcmp_oge_f64
85; AVX: vcmplesd %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +000086; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +000087 %1 = fcmp oge double %a, %b
88 %2 = select i1 %1, double %c, double %d
89 ret double %2
90}
91
92define float @select_fcmp_olt_f32(float %a, float %b, float %c, float %d) {
93; CHECK-LABEL: select_fcmp_olt_f32
94; CHECK: cmpltss %xmm1, %xmm0
95; CHECK-NEXT: andps %xmm0, %xmm2
96; CHECK-NEXT: andnps %xmm3, %xmm0
97; CHECK-NEXT: orps %xmm2, %xmm0
98; AVX-LABEL: select_fcmp_olt_f32
99; AVX: vcmpltss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000100; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000101 %1 = fcmp olt float %a, %b
102 %2 = select i1 %1, float %c, float %d
103 ret float %2
104}
105
106define double @select_fcmp_olt_f64(double %a, double %b, double %c, double %d) {
107; CHECK-LABEL: select_fcmp_olt_f64
108; CHECK: cmpltsd %xmm1, %xmm0
109; CHECK-NEXT: andpd %xmm0, %xmm2
110; CHECK-NEXT: andnpd %xmm3, %xmm0
111; CHECK-NEXT: orpd %xmm2, %xmm0
112; AVX-LABEL: select_fcmp_olt_f64
113; AVX: vcmpltsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000114; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000115 %1 = fcmp olt double %a, %b
116 %2 = select i1 %1, double %c, double %d
117 ret double %2
118}
119
120define float @select_fcmp_ole_f32(float %a, float %b, float %c, float %d) {
121; CHECK-LABEL: select_fcmp_ole_f32
122; CHECK: cmpless %xmm1, %xmm0
123; CHECK-NEXT: andps %xmm0, %xmm2
124; CHECK-NEXT: andnps %xmm3, %xmm0
125; CHECK-NEXT: orps %xmm2, %xmm0
126; AVX-LABEL: select_fcmp_ole_f32
127; AVX: vcmpless %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000128; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000129 %1 = fcmp ole float %a, %b
130 %2 = select i1 %1, float %c, float %d
131 ret float %2
132}
133
134define double @select_fcmp_ole_f64(double %a, double %b, double %c, double %d) {
135; CHECK-LABEL: select_fcmp_ole_f64
136; CHECK: cmplesd %xmm1, %xmm0
137; CHECK-NEXT: andpd %xmm0, %xmm2
138; CHECK-NEXT: andnpd %xmm3, %xmm0
139; CHECK-NEXT: orpd %xmm2, %xmm0
140; AVX-LABEL: select_fcmp_ole_f64
141; AVX: vcmplesd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000142; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000143 %1 = fcmp ole double %a, %b
144 %2 = select i1 %1, double %c, double %d
145 ret double %2
146}
147
148define float @select_fcmp_ord_f32(float %a, float %b, float %c, float %d) {
149; CHECK-LABEL: select_fcmp_ord_f32
150; CHECK: cmpordss %xmm1, %xmm0
151; CHECK-NEXT: andps %xmm0, %xmm2
152; CHECK-NEXT: andnps %xmm3, %xmm0
153; CHECK-NEXT: orps %xmm2, %xmm0
154; AVX-LABEL: select_fcmp_ord_f32
155; AVX: vcmpordss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000156; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000157 %1 = fcmp ord float %a, %b
158 %2 = select i1 %1, float %c, float %d
159 ret float %2
160}
161
162define double @select_fcmp_ord_f64(double %a, double %b, double %c, double %d) {
163; CHECK-LABEL: select_fcmp_ord_f64
164; CHECK: cmpordsd %xmm1, %xmm0
165; CHECK-NEXT: andpd %xmm0, %xmm2
166; CHECK-NEXT: andnpd %xmm3, %xmm0
167; CHECK-NEXT: orpd %xmm2, %xmm0
168; AVX-LABEL: select_fcmp_ord_f64
169; AVX: vcmpordsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000170; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000171 %1 = fcmp ord double %a, %b
172 %2 = select i1 %1, double %c, double %d
173 ret double %2
174}
175
176define float @select_fcmp_uno_f32(float %a, float %b, float %c, float %d) {
177; CHECK-LABEL: select_fcmp_uno_f32
178; CHECK: cmpunordss %xmm1, %xmm0
179; CHECK-NEXT: andps %xmm0, %xmm2
180; CHECK-NEXT: andnps %xmm3, %xmm0
181; CHECK-NEXT: orps %xmm2, %xmm0
182; AVX-LABEL: select_fcmp_uno_f32
183; AVX: vcmpunordss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000184; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000185 %1 = fcmp uno float %a, %b
186 %2 = select i1 %1, float %c, float %d
187 ret float %2
188}
189
190define double @select_fcmp_uno_f64(double %a, double %b, double %c, double %d) {
191; CHECK-LABEL: select_fcmp_uno_f64
192; CHECK: cmpunordsd %xmm1, %xmm0
193; CHECK-NEXT: andpd %xmm0, %xmm2
194; CHECK-NEXT: andnpd %xmm3, %xmm0
195; CHECK-NEXT: orpd %xmm2, %xmm0
196; AVX-LABEL: select_fcmp_uno_f64
197; AVX: vcmpunordsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000198; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000199 %1 = fcmp uno double %a, %b
200 %2 = select i1 %1, double %c, double %d
201 ret double %2
202}
203
204define float @select_fcmp_ugt_f32(float %a, float %b, float %c, float %d) {
205; CHECK-LABEL: select_fcmp_ugt_f32
206; CHECK: cmpnless %xmm1, %xmm0
207; CHECK-NEXT: andps %xmm0, %xmm2
208; CHECK-NEXT: andnps %xmm3, %xmm0
209; CHECK-NEXT: orps %xmm2, %xmm0
210; AVX-LABEL: select_fcmp_ugt_f32
211; AVX: vcmpnless %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000212; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000213 %1 = fcmp ugt float %a, %b
214 %2 = select i1 %1, float %c, float %d
215 ret float %2
216}
217
218define double @select_fcmp_ugt_f64(double %a, double %b, double %c, double %d) {
219; CHECK-LABEL: select_fcmp_ugt_f64
220; CHECK: cmpnlesd %xmm1, %xmm0
221; CHECK-NEXT: andpd %xmm0, %xmm2
222; CHECK-NEXT: andnpd %xmm3, %xmm0
223; CHECK-NEXT: orpd %xmm2, %xmm0
224; AVX-LABEL: select_fcmp_ugt_f64
225; AVX: vcmpnlesd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000226; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000227 %1 = fcmp ugt double %a, %b
228 %2 = select i1 %1, double %c, double %d
229 ret double %2
230}
231
232define float @select_fcmp_uge_f32(float %a, float %b, float %c, float %d) {
233; CHECK-LABEL: select_fcmp_uge_f32
234; CHECK: cmpnltss %xmm1, %xmm0
235; CHECK-NEXT: andps %xmm0, %xmm2
236; CHECK-NEXT: andnps %xmm3, %xmm0
237; CHECK-NEXT: orps %xmm2, %xmm0
238; AVX-LABEL: select_fcmp_uge_f32
239; AVX: vcmpnltss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000240; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000241 %1 = fcmp uge float %a, %b
242 %2 = select i1 %1, float %c, float %d
243 ret float %2
244}
245
246define double @select_fcmp_uge_f64(double %a, double %b, double %c, double %d) {
247; CHECK-LABEL: select_fcmp_uge_f64
248; CHECK: cmpnltsd %xmm1, %xmm0
249; CHECK-NEXT: andpd %xmm0, %xmm2
250; CHECK-NEXT: andnpd %xmm3, %xmm0
251; CHECK-NEXT: orpd %xmm2, %xmm0
252; AVX-LABEL: select_fcmp_uge_f64
253; AVX: vcmpnltsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000254; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000255 %1 = fcmp uge double %a, %b
256 %2 = select i1 %1, double %c, double %d
257 ret double %2
258}
259
260define float @select_fcmp_ult_f32(float %a, float %b, float %c, float %d) {
261; CHECK-LABEL: select_fcmp_ult_f32
262; CHECK: cmpnless %xmm0, %xmm1
263; CHECK-NEXT: andps %xmm1, %xmm2
264; CHECK-NEXT: andnps %xmm3, %xmm1
265; CHECK-NEXT: orps %xmm2, %xmm1
266; AVX-LABEL: select_fcmp_ult_f32
267; AVX: vcmpnless %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000268; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000269 %1 = fcmp ult float %a, %b
270 %2 = select i1 %1, float %c, float %d
271 ret float %2
272}
273
274define double @select_fcmp_ult_f64(double %a, double %b, double %c, double %d) {
275; CHECK-LABEL: select_fcmp_ult_f64
276; CHECK: cmpnlesd %xmm0, %xmm1
277; CHECK-NEXT: andpd %xmm1, %xmm2
278; CHECK-NEXT: andnpd %xmm3, %xmm1
279; CHECK-NEXT: orpd %xmm2, %xmm1
280; AVX-LABEL: select_fcmp_ult_f64
281; AVX: vcmpnlesd %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000282; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000283 %1 = fcmp ult double %a, %b
284 %2 = select i1 %1, double %c, double %d
285 ret double %2
286}
287
288define float @select_fcmp_ule_f32(float %a, float %b, float %c, float %d) {
289; CHECK-LABEL: select_fcmp_ule_f32
290; CHECK: cmpnltss %xmm0, %xmm1
291; CHECK-NEXT: andps %xmm1, %xmm2
292; CHECK-NEXT: andnps %xmm3, %xmm1
293; CHECK-NEXT: orps %xmm2, %xmm1
294; AVX-LABEL: select_fcmp_ule_f32
295; AVX: vcmpnltss %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000296; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000297 %1 = fcmp ule float %a, %b
298 %2 = select i1 %1, float %c, float %d
299 ret float %2
300}
301
302define double @select_fcmp_ule_f64(double %a, double %b, double %c, double %d) {
303; CHECK-LABEL: select_fcmp_ule_f64
304; CHECK: cmpnltsd %xmm0, %xmm1
305; CHECK-NEXT: andpd %xmm1, %xmm2
306; CHECK-NEXT: andnpd %xmm3, %xmm1
307; CHECK-NEXT: orpd %xmm2, %xmm1
308; AVX-LABEL: select_fcmp_ule_f64
309; AVX: vcmpnltsd %xmm0, %xmm1, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000310; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000311 %1 = fcmp ule double %a, %b
312 %2 = select i1 %1, double %c, double %d
313 ret double %2
314}
315
316define float @select_fcmp_une_f32(float %a, float %b, float %c, float %d) {
317; CHECK-LABEL: select_fcmp_une_f32
318; CHECK: cmpneqss %xmm1, %xmm0
319; CHECK-NEXT: andps %xmm0, %xmm2
320; CHECK-NEXT: andnps %xmm3, %xmm0
321; CHECK-NEXT: orps %xmm2, %xmm0
322; AVX-LABEL: select_fcmp_une_f32
323; AVX: vcmpneqss %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000324; AVX-NEXT: vblendvps %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000325 %1 = fcmp une float %a, %b
326 %2 = select i1 %1, float %c, float %d
327 ret float %2
328}
329
330define double @select_fcmp_une_f64(double %a, double %b, double %c, double %d) {
331; CHECK-LABEL: select_fcmp_une_f64
332; CHECK: cmpneqsd %xmm1, %xmm0
333; CHECK-NEXT: andpd %xmm0, %xmm2
334; CHECK-NEXT: andnpd %xmm3, %xmm0
335; CHECK-NEXT: orpd %xmm2, %xmm0
336; AVX-LABEL: select_fcmp_une_f64
337; AVX: vcmpneqsd %xmm1, %xmm0, %xmm0
Sanjay Patel302404b2015-03-05 21:46:54 +0000338; AVX-NEXT: vblendvpd %xmm0, %xmm2, %xmm3, %xmm0
Juergen Ributzka21d56082014-06-23 21:55:40 +0000339 %1 = fcmp une double %a, %b
340 %2 = select i1 %1, double %c, double %d
341 ret double %2
342}
343