blob: fc6d66a6082edc663aedfb94bda29fbe0b6ba68f [file] [log] [blame]
Eric Christophercee313d2019-04-17 04:52:47 +00001; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -codegenprepare -S < %s | FileCheck %s
3; RUN: opt -enable-debugify -codegenprepare -S < %s 2>&1 | FileCheck %s -check-prefix=DEBUG
4
5target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
6target triple = "x86_64-apple-darwin10.0.0"
7
8define i64 @uaddo1(i64 %a, i64 %b) nounwind ssp {
9; CHECK-LABEL: @uaddo1(
10; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
11; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
12; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
13; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
14; CHECK-NEXT: ret i64 [[Q]]
15;
16 %add = add i64 %b, %a
17 %cmp = icmp ult i64 %add, %a
18 %Q = select i1 %cmp, i64 %b, i64 42
19 ret i64 %Q
20}
21
22define i64 @uaddo2(i64 %a, i64 %b) nounwind ssp {
23; CHECK-LABEL: @uaddo2(
24; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
25; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
26; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
27; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
28; CHECK-NEXT: ret i64 [[Q]]
29;
30 %add = add i64 %b, %a
31 %cmp = icmp ult i64 %add, %b
32 %Q = select i1 %cmp, i64 %b, i64 42
33 ret i64 %Q
34}
35
36define i64 @uaddo3(i64 %a, i64 %b) nounwind ssp {
37; CHECK-LABEL: @uaddo3(
38; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[B:%.*]], i64 [[A:%.*]])
39; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
40; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
41; CHECK-NEXT: [[Q:%.*]] = select i1 [[OV]], i64 [[B]], i64 42
42; CHECK-NEXT: ret i64 [[Q]]
43;
44 %add = add i64 %b, %a
45 %cmp = icmp ugt i64 %b, %add
46 %Q = select i1 %cmp, i64 %b, i64 42
47 ret i64 %Q
48}
49
Sanjay Patel5ab41a72019-05-04 12:46:32 +000050; TODO? CGP sinks the compare before we have a chance to form the overflow intrinsic.
51
Eric Christophercee313d2019-04-17 04:52:47 +000052define i64 @uaddo4(i64 %a, i64 %b, i1 %c) nounwind ssp {
53; CHECK-LABEL: @uaddo4(
54; CHECK-NEXT: entry:
Sanjay Patel5ab41a72019-05-04 12:46:32 +000055; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
Eric Christophercee313d2019-04-17 04:52:47 +000056; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
57; CHECK: next:
Sanjay Patel5ab41a72019-05-04 12:46:32 +000058; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
59; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
Eric Christophercee313d2019-04-17 04:52:47 +000060; CHECK-NEXT: ret i64 [[Q]]
61; CHECK: exit:
62; CHECK-NEXT: ret i64 0
63;
64entry:
65 %add = add i64 %b, %a
66 %cmp = icmp ugt i64 %b, %add
67 br i1 %c, label %next, label %exit
68
69next:
70 %Q = select i1 %cmp, i64 %b, i64 42
71 ret i64 %Q
72
73exit:
74 ret i64 0
75}
76
77define i64 @uaddo5(i64 %a, i64 %b, i64* %ptr, i1 %c) nounwind ssp {
78; CHECK-LABEL: @uaddo5(
79; CHECK-NEXT: entry:
80; CHECK-NEXT: [[ADD:%.*]] = add i64 [[B:%.*]], [[A:%.*]]
81; CHECK-NEXT: store i64 [[ADD]], i64* [[PTR:%.*]]
82; CHECK-NEXT: br i1 [[C:%.*]], label [[NEXT:%.*]], label [[EXIT:%.*]]
83; CHECK: next:
84; CHECK-NEXT: [[TMP0:%.*]] = icmp ugt i64 [[B]], [[ADD]]
85; CHECK-NEXT: [[Q:%.*]] = select i1 [[TMP0]], i64 [[B]], i64 42
86; CHECK-NEXT: ret i64 [[Q]]
87; CHECK: exit:
88; CHECK-NEXT: ret i64 0
89;
90entry:
91 %add = add i64 %b, %a
92 store i64 %add, i64* %ptr
93 %cmp = icmp ugt i64 %b, %add
94 br i1 %c, label %next, label %exit
95
96next:
97 %Q = select i1 %cmp, i64 %b, i64 42
98 ret i64 %Q
99
100exit:
101 ret i64 0
102}
103
104; When adding 1, the general pattern for add-overflow may be different due to icmp canonicalization.
105; PR31754: https://bugs.llvm.org/show_bug.cgi?id=31754
106
107define i1 @uaddo_i64_increment(i64 %x, i64* %p) {
108; CHECK-LABEL: @uaddo_i64_increment(
109; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
110; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
111; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
112; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
113; CHECK-NEXT: ret i1 [[OV1]]
114;
115 %a = add i64 %x, 1
116 %ov = icmp eq i64 %a, 0
117 store i64 %a, i64* %p
118 ret i1 %ov
119}
120
121define i1 @uaddo_i8_increment_noncanonical_1(i8 %x, i8* %p) {
122; CHECK-LABEL: @uaddo_i8_increment_noncanonical_1(
123; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.uadd.with.overflow.i8(i8 1, i8 [[X:%.*]])
124; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
125; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
126; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
127; CHECK-NEXT: ret i1 [[OV1]]
128;
129 %a = add i8 1, %x ; commute
130 %ov = icmp eq i8 %a, 0
131 store i8 %a, i8* %p
132 ret i1 %ov
133}
134
135define i1 @uaddo_i32_increment_noncanonical_2(i32 %x, i32* %p) {
136; CHECK-LABEL: @uaddo_i32_increment_noncanonical_2(
137; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.uadd.with.overflow.i32(i32 [[X:%.*]], i32 1)
138; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
139; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
140; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
141; CHECK-NEXT: ret i1 [[OV1]]
142;
143 %a = add i32 %x, 1
144 %ov = icmp eq i32 0, %a ; commute
145 store i32 %a, i32* %p
146 ret i1 %ov
147}
148
149define i1 @uaddo_i16_increment_noncanonical_3(i16 %x, i16* %p) {
150; CHECK-LABEL: @uaddo_i16_increment_noncanonical_3(
151; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.uadd.with.overflow.i16(i16 1, i16 [[X:%.*]])
152; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
153; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
154; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
155; CHECK-NEXT: ret i1 [[OV1]]
156;
157 %a = add i16 1, %x ; commute
158 %ov = icmp eq i16 0, %a ; commute
159 store i16 %a, i16* %p
160 ret i1 %ov
161}
162
163; The overflow check may be against the input rather than the sum.
164
165define i1 @uaddo_i64_increment_alt(i64 %x, i64* %p) {
166; CHECK-LABEL: @uaddo_i64_increment_alt(
167; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
168; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
169; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
170; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
171; CHECK-NEXT: ret i1 [[OV1]]
172;
173 %a = add i64 %x, 1
174 store i64 %a, i64* %p
175 %ov = icmp eq i64 %x, -1
176 ret i1 %ov
177}
178
179; Make sure insertion is done correctly based on dominance.
180
181define i1 @uaddo_i64_increment_alt_dom(i64 %x, i64* %p) {
182; CHECK-LABEL: @uaddo_i64_increment_alt_dom(
183; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 1)
184; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
185; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
186; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
187; CHECK-NEXT: ret i1 [[OV1]]
188;
189 %ov = icmp eq i64 %x, -1
190 %a = add i64 %x, 1
191 store i64 %a, i64* %p
192 ret i1 %ov
193}
194
195; The overflow check may be against the input rather than the sum.
196
197define i1 @uaddo_i64_decrement_alt(i64 %x, i64* %p) {
198; CHECK-LABEL: @uaddo_i64_decrement_alt(
199; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
200; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
201; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
202; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
203; CHECK-NEXT: ret i1 [[OV1]]
204;
205 %a = add i64 %x, -1
206 store i64 %a, i64* %p
207 %ov = icmp ne i64 %x, 0
208 ret i1 %ov
209}
210
211; Make sure insertion is done correctly based on dominance.
212
213define i1 @uaddo_i64_decrement_alt_dom(i64 %x, i64* %p) {
214; CHECK-LABEL: @uaddo_i64_decrement_alt_dom(
215; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 [[X:%.*]], i64 -1)
216; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
217; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
218; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
219; CHECK-NEXT: ret i1 [[OV1]]
220;
221 %ov = icmp ne i64 %x, 0
222 %a = add i64 %x, -1
223 store i64 %a, i64* %p
224 ret i1 %ov
225}
226
227; No transform for illegal types.
228
229define i1 @uaddo_i42_increment_illegal_type(i42 %x, i42* %p) {
230; CHECK-LABEL: @uaddo_i42_increment_illegal_type(
231; CHECK-NEXT: [[A:%.*]] = add i42 [[X:%.*]], 1
232; CHECK-NEXT: [[OV:%.*]] = icmp eq i42 [[A]], 0
233; CHECK-NEXT: store i42 [[A]], i42* [[P:%.*]]
234; CHECK-NEXT: ret i1 [[OV]]
235;
236 %a = add i42 %x, 1
237 %ov = icmp eq i42 %a, 0
238 store i42 %a, i42* %p
239 ret i1 %ov
240}
241
242define i1 @usubo_ult_i64(i64 %x, i64 %y, i64* %p) {
243; CHECK-LABEL: @usubo_ult_i64(
244; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X:%.*]], i64 [[Y:%.*]])
245; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
246; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
247; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
248; CHECK-NEXT: ret i1 [[OV1]]
249;
250 %s = sub i64 %x, %y
251 store i64 %s, i64* %p
252 %ov = icmp ult i64 %x, %y
253 ret i1 %ov
254}
255
256; Verify insertion point for single-BB. Toggle predicate.
257
258define i1 @usubo_ugt_i32(i32 %x, i32 %y, i32* %p) {
259; CHECK-LABEL: @usubo_ugt_i32(
260; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 [[Y:%.*]])
261; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
262; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
263; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
264; CHECK-NEXT: ret i1 [[OV1]]
265;
266 %ov = icmp ugt i32 %y, %x
267 %s = sub i32 %x, %y
268 store i32 %s, i32* %p
269 ret i1 %ov
270}
271
272; Constant operand should match.
273
274define i1 @usubo_ugt_constant_op0_i8(i8 %x, i8* %p) {
275; CHECK-LABEL: @usubo_ugt_constant_op0_i8(
276; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 42, i8 [[X:%.*]])
277; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
278; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
279; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
280; CHECK-NEXT: ret i1 [[OV1]]
281;
282 %s = sub i8 42, %x
283 %ov = icmp ugt i8 %x, 42
284 store i8 %s, i8* %p
285 ret i1 %ov
286}
287
288; Compare with constant operand 0 is canonicalized by commuting, but verify match for non-canonical form.
289
290define i1 @usubo_ult_constant_op0_i16(i16 %x, i16* %p) {
291; CHECK-LABEL: @usubo_ult_constant_op0_i16(
292; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 43, i16 [[X:%.*]])
293; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
294; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
295; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
296; CHECK-NEXT: ret i1 [[OV1]]
297;
298 %s = sub i16 43, %x
299 %ov = icmp ult i16 43, %x
300 store i16 %s, i16* %p
301 ret i1 %ov
302}
303
304; Subtract with constant operand 1 is canonicalized to add.
305
306define i1 @usubo_ult_constant_op1_i16(i16 %x, i16* %p) {
307; CHECK-LABEL: @usubo_ult_constant_op1_i16(
308; CHECK-NEXT: [[TMP1:%.*]] = call { i16, i1 } @llvm.usub.with.overflow.i16(i16 [[X:%.*]], i16 44)
309; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i16, i1 } [[TMP1]], 0
310; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i16, i1 } [[TMP1]], 1
311; CHECK-NEXT: store i16 [[MATH]], i16* [[P:%.*]]
312; CHECK-NEXT: ret i1 [[OV1]]
313;
314 %s = add i16 %x, -44
315 %ov = icmp ult i16 %x, 44
316 store i16 %s, i16* %p
317 ret i1 %ov
318}
319
320define i1 @usubo_ugt_constant_op1_i8(i8 %x, i8* %p) {
321; CHECK-LABEL: @usubo_ugt_constant_op1_i8(
322; CHECK-NEXT: [[TMP1:%.*]] = call { i8, i1 } @llvm.usub.with.overflow.i8(i8 [[X:%.*]], i8 45)
323; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i8, i1 } [[TMP1]], 0
324; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i8, i1 } [[TMP1]], 1
325; CHECK-NEXT: store i8 [[MATH]], i8* [[P:%.*]]
326; CHECK-NEXT: ret i1 [[OV1]]
327;
328 %ov = icmp ugt i8 45, %x
329 %s = add i8 %x, -45
330 store i8 %s, i8* %p
331 ret i1 %ov
332}
333
334; Special-case: subtract 1 changes the compare predicate and constant.
335
336define i1 @usubo_eq_constant1_op1_i32(i32 %x, i32* %p) {
337; CHECK-LABEL: @usubo_eq_constant1_op1_i32(
338; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 [[X:%.*]], i32 1)
339; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
340; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
341; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
342; CHECK-NEXT: ret i1 [[OV1]]
343;
344 %s = add i32 %x, -1
345 %ov = icmp eq i32 %x, 0
346 store i32 %s, i32* %p
347 ret i1 %ov
348}
349
350; Special-case: subtract from 0 (negate) changes the compare predicate.
351
352define i1 @usubo_ne_constant0_op1_i32(i32 %x, i32* %p) {
353; CHECK-LABEL: @usubo_ne_constant0_op1_i32(
354; CHECK-NEXT: [[TMP1:%.*]] = call { i32, i1 } @llvm.usub.with.overflow.i32(i32 0, i32 [[X:%.*]])
355; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i32, i1 } [[TMP1]], 0
356; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i32, i1 } [[TMP1]], 1
357; CHECK-NEXT: store i32 [[MATH]], i32* [[P:%.*]]
358; CHECK-NEXT: ret i1 [[OV1]]
359;
360 %s = sub i32 0, %x
361 %ov = icmp ne i32 %x, 0
362 store i32 %s, i32* %p
363 ret i1 %ov
364}
365
Sanjay Patel5ab41a72019-05-04 12:46:32 +0000366; This used to verify insertion point for multi-BB, but now we just bail out.
Eric Christophercee313d2019-04-17 04:52:47 +0000367
368declare void @call(i1)
369
370define i1 @usubo_ult_sub_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
371; CHECK-LABEL: @usubo_ult_sub_dominates_i64(
372; CHECK-NEXT: entry:
373; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
374; CHECK: t:
Sanjay Patel5ab41a72019-05-04 12:46:32 +0000375; CHECK-NEXT: [[S:%.*]] = sub i64 [[X:%.*]], [[Y:%.*]]
376; CHECK-NEXT: store i64 [[S]], i64* [[P:%.*]]
Eric Christophercee313d2019-04-17 04:52:47 +0000377; CHECK-NEXT: br i1 [[COND]], label [[END:%.*]], label [[F]]
378; CHECK: f:
379; CHECK-NEXT: ret i1 [[COND]]
380; CHECK: end:
Sanjay Patel5ab41a72019-05-04 12:46:32 +0000381; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X]], [[Y]]
382; CHECK-NEXT: ret i1 [[OV]]
Eric Christophercee313d2019-04-17 04:52:47 +0000383;
384entry:
385 br i1 %cond, label %t, label %f
386
387t:
388 %s = sub i64 %x, %y
389 store i64 %s, i64* %p
390 br i1 %cond, label %end, label %f
391
392f:
393 ret i1 %cond
394
395end:
396 %ov = icmp ult i64 %x, %y
397 ret i1 %ov
398}
399
400define i1 @usubo_ult_cmp_dominates_i64(i64 %x, i64 %y, i64* %p, i1 %cond) {
401; CHECK-LABEL: @usubo_ult_cmp_dominates_i64(
402; CHECK-NEXT: entry:
403; CHECK-NEXT: br i1 [[COND:%.*]], label [[T:%.*]], label [[F:%.*]]
404; CHECK: t:
405; CHECK-NEXT: [[OV:%.*]] = icmp ult i64 [[X:%.*]], [[Y:%.*]]
406; CHECK-NEXT: call void @call(i1 [[OV]])
407; CHECK-NEXT: br i1 [[OV]], label [[END:%.*]], label [[F]]
408; CHECK: f:
409; CHECK-NEXT: ret i1 [[COND]]
410; CHECK: end:
411; CHECK-NEXT: [[TMP0:%.*]] = call { i64, i1 } @llvm.usub.with.overflow.i64(i64 [[X]], i64 [[Y]])
412; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP0]], 0
413; CHECK-NEXT: [[OV1:%.*]] = extractvalue { i64, i1 } [[TMP0]], 1
414; CHECK-NEXT: store i64 [[MATH]], i64* [[P:%.*]]
415; CHECK-NEXT: ret i1 [[OV1]]
416;
417entry:
418 br i1 %cond, label %t, label %f
419
420t:
421 %ov = icmp ult i64 %x, %y
422 call void @call(i1 %ov)
423 br i1 %ov, label %end, label %f
424
425f:
426 ret i1 %cond
427
428end:
429 %s = sub i64 %x, %y
430 store i64 %s, i64* %p
431 ret i1 %ov
432}
433
434; Verify that crazy/non-canonical code does not crash.
435
436define void @bar() {
437; CHECK-LABEL: @bar(
438; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, -1
439; CHECK-NEXT: [[FROMBOOL:%.*]] = zext i1 [[CMP]] to i8
440; CHECK-NEXT: unreachable
441;
442 %cmp = icmp eq i64 1, -1
443 %frombool = zext i1 %cmp to i8
444 unreachable
445}
446
447define void @foo() {
448; CHECK-LABEL: @foo(
449; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, 1
450; CHECK-NEXT: [[CONV:%.*]] = trunc i64 [[SUB]] to i32
451; CHECK-NEXT: unreachable
452;
453 %sub = add nsw i64 1, 1
454 %conv = trunc i64 %sub to i32
455 unreachable
456}
457
458; Similarly for usubo.
459
460define i1 @bar2() {
461; CHECK-LABEL: @bar2(
462; CHECK-NEXT: [[CMP:%.*]] = icmp eq i64 1, 0
463; CHECK-NEXT: ret i1 [[CMP]]
464;
465 %cmp = icmp eq i64 1, 0
466 ret i1 %cmp
467}
468
469define i64 @foo2(i8 *%p) {
470; CHECK-LABEL: @foo2(
471; CHECK-NEXT: [[SUB:%.*]] = add nsw i64 1, -1
472; CHECK-NEXT: ret i64 [[SUB]]
473;
474 %sub = add nsw i64 1, -1
475 ret i64 %sub
476}
477
478; Avoid hoisting a math op into a dominating block which would
479; increase the critical path.
480
481define void @PR41129(i64* %p64) {
482; CHECK-LABEL: @PR41129(
483; CHECK-NEXT: entry:
484; CHECK-NEXT: [[KEY:%.*]] = load i64, i64* [[P64:%.*]], align 8
485; CHECK-NEXT: [[COND17:%.*]] = icmp eq i64 [[KEY]], 0
486; CHECK-NEXT: br i1 [[COND17]], label [[TRUE:%.*]], label [[FALSE:%.*]]
487; CHECK: false:
488; CHECK-NEXT: [[ANDVAL:%.*]] = and i64 [[KEY]], 7
489; CHECK-NEXT: store i64 [[ANDVAL]], i64* [[P64]]
490; CHECK-NEXT: br label [[EXIT:%.*]]
491; CHECK: true:
492; CHECK-NEXT: [[SVALUE:%.*]] = add i64 [[KEY]], -1
493; CHECK-NEXT: store i64 [[SVALUE]], i64* [[P64]]
494; CHECK-NEXT: br label [[EXIT]]
495; CHECK: exit:
496; CHECK-NEXT: ret void
497;
498entry:
499 %key = load i64, i64* %p64, align 8
500 %cond17 = icmp eq i64 %key, 0
501 br i1 %cond17, label %true, label %false
502
503false:
504 %andval = and i64 %key, 7
505 store i64 %andval, i64* %p64
506 br label %exit
507
508true:
509 %svalue = add i64 %key, -1
510 store i64 %svalue, i64* %p64
511 br label %exit
512
513exit:
514 ret void
515}
516
Sanjay Patel5ab41a72019-05-04 12:46:32 +0000517; This was crashing when trying to delay instruction removal/deletion.
518
519declare i64 @llvm.objectsize.i64.p0i8(i8*, i1 immarg, i1 immarg, i1 immarg) #0
520
521define hidden fastcc void @crash() {
522; CHECK-LABEL: @crash(
523; CHECK-NEXT: [[TMP1:%.*]] = call { i64, i1 } @llvm.uadd.with.overflow.i64(i64 undef, i64 undef)
524; CHECK-NEXT: [[MATH:%.*]] = extractvalue { i64, i1 } [[TMP1]], 0
525; CHECK-NEXT: [[OV:%.*]] = extractvalue { i64, i1 } [[TMP1]], 1
526; CHECK-NEXT: [[T2:%.*]] = select i1 undef, i1 undef, i1 [[OV]]
527; CHECK-NEXT: unreachable
528;
529 %t0 = add i64 undef, undef
530 %t1 = icmp ult i64 %t0, undef
531 %t2 = select i1 undef, i1 undef, i1 %t1
532 %t3 = call i64 @llvm.objectsize.i64.p0i8(i8* nonnull undef, i1 false, i1 false, i1 false)
533 %t4 = icmp ugt i64 %t3, 7
534 unreachable
535}
536
Eric Christophercee313d2019-04-17 04:52:47 +0000537; Check that every instruction inserted by -codegenprepare has a debug location.
538; DEBUG: CheckModuleDebugify: PASS
539