blob: 08dcfa731a94f6fd91bce153e29600e9d192a217 [file] [log] [blame]
Chris Lattner39c07b22009-11-09 07:07:56 +00001; RUN: opt %s -instcombine -S | FileCheck %s
2
Chris Lattner1cc4cca2009-11-29 02:57:29 +00003%overflow.result = type {i8, i1}
4
5declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8)
6declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8)
Chris Lattner8330daf2010-01-01 01:52:15 +00007declare double @llvm.powi.f64(double, i32) nounwind readonly
Chris Lattner9da1cb22010-01-05 07:23:56 +00008declare i32 @llvm.cttz.i32(i32) nounwind readnone
Chris Lattner54f4e392010-01-05 18:09:56 +00009declare i32 @llvm.ctlz.i32(i32) nounwind readnone
10declare i32 @llvm.ctpop.i32(i32) nounwind readnone
Chris Lattner9da1cb22010-01-05 07:23:56 +000011declare i8 @llvm.ctlz.i8(i8) nounwind readnone
Chris Lattner39c07b22009-11-09 07:07:56 +000012
13define i8 @test1(i8 %A, i8 %B) {
Chris Lattner1cc4cca2009-11-29 02:57:29 +000014 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
15 %y = extractvalue %overflow.result %x, 0
Chris Lattner39c07b22009-11-09 07:07:56 +000016 ret i8 %y
17; CHECK: @test1
18; CHECK-NEXT: %y = add i8 %A, %B
19; CHECK-NEXT: ret i8 %y
20}
Chris Lattner1cc4cca2009-11-29 02:57:29 +000021
22define i8 @test2(i8 %A, i8 %B, i1* %overflowPtr) {
23 %and.A = and i8 %A, 127
24 %and.B = and i8 %B, 127
25 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
26 %y = extractvalue %overflow.result %x, 0
27 %z = extractvalue %overflow.result %x, 1
28 store i1 %z, i1* %overflowPtr
29 ret i8 %y
30; CHECK: @test2
31; CHECK-NEXT: %and.A = and i8 %A, 127
32; CHECK-NEXT: %and.B = and i8 %B, 127
33; CHECK-NEXT: %1 = add nuw i8 %and.A, %and.B
34; CHECK-NEXT: store i1 false, i1* %overflowPtr
35; CHECK-NEXT: ret i8 %1
36}
37
38define i8 @test3(i8 %A, i8 %B, i1* %overflowPtr) {
39 %or.A = or i8 %A, -128
40 %or.B = or i8 %B, -128
41 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
42 %y = extractvalue %overflow.result %x, 0
43 %z = extractvalue %overflow.result %x, 1
44 store i1 %z, i1* %overflowPtr
45 ret i8 %y
46; CHECK: @test3
47; CHECK-NEXT: %or.A = or i8 %A, -128
48; CHECK-NEXT: %or.B = or i8 %B, -128
49; CHECK-NEXT: %1 = add i8 %or.A, %or.B
50; CHECK-NEXT: store i1 true, i1* %overflowPtr
51; CHECK-NEXT: ret i8 %1
52}
53
54define i8 @test4(i8 %A, i1* %overflowPtr) {
55 %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
56 %y = extractvalue %overflow.result %x, 0
57 %z = extractvalue %overflow.result %x, 1
58 store i1 %z, i1* %overflowPtr
59 ret i8 %y
60; CHECK: @test4
61; CHECK-NEXT: ret i8 undef
62}
63
64define i8 @test5(i8 %A, i1* %overflowPtr) {
65 %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
66 %y = extractvalue %overflow.result %x, 0
67 %z = extractvalue %overflow.result %x, 1
68 store i1 %z, i1* %overflowPtr
69 ret i8 %y
70; CHECK: @test5
71; CHECK-NEXT: store i1 false, i1* %overflowPtr
72; CHECK-NEXT: ret i8 0
73}
74
75define i8 @test6(i8 %A, i1* %overflowPtr) {
76 %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 1, i8 %A)
77 %y = extractvalue %overflow.result %x, 0
78 %z = extractvalue %overflow.result %x, 1
79 store i1 %z, i1* %overflowPtr
80 ret i8 %y
81; CHECK: @test6
82; CHECK-NEXT: store i1 false, i1* %overflowPtr
83; CHECK-NEXT: ret i8 %A
84}
Chris Lattner8330daf2010-01-01 01:52:15 +000085
Chris Lattner8330daf2010-01-01 01:52:15 +000086define void @powi(double %V, double *%P) {
87entry:
88 %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
89 volatile store double %A, double* %P
90
91 %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
92 volatile store double %B, double* %P
93
94 %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
95 volatile store double %C, double* %P
96 ret void
97; CHECK: @powi
98; CHECK: %A = fdiv double 1.0{{.*}}, %V
99; CHECK: volatile store double %A,
100; CHECK: volatile store double 1.0
101; CHECK: volatile store double %V
102}
103
Chris Lattner54f4e392010-01-05 18:09:56 +0000104define i32 @cttz(i32 %a) {
Chris Lattner9da1cb22010-01-05 07:23:56 +0000105entry:
106 %or = or i32 %a, 8
107 %and = and i32 %or, -8
108 %count = tail call i32 @llvm.cttz.i32(i32 %and) nounwind readnone
109 ret i32 %count
110; CHECK: @cttz
111; CHECK-NEXT: entry:
112; CHECK-NEXT: ret i32 3
113}
Chris Lattner8330daf2010-01-01 01:52:15 +0000114
Chris Lattner54f4e392010-01-05 18:09:56 +0000115define i8 @ctlz(i8 %a) {
Chris Lattner9da1cb22010-01-05 07:23:56 +0000116entry:
117 %or = or i8 %a, 32
118 %and = and i8 %or, 63
119 %count = tail call i8 @llvm.ctlz.i8(i8 %and) nounwind readnone
120 ret i8 %count
121; CHECK: @ctlz
122; CHECK-NEXT: entry:
123; CHECK-NEXT: ret i8 2
124}
Chris Lattner54f4e392010-01-05 18:09:56 +0000125
126define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
127entry:
128 %lz = tail call i32 @llvm.ctlz.i32(i32 %a) nounwind readnone
129 %lz.cmp = icmp eq i32 %lz, 32
130 volatile store i1 %lz.cmp, i1* %c
131 %tz = tail call i32 @llvm.cttz.i32(i32 %a) nounwind readnone
132 %tz.cmp = icmp ne i32 %tz, 32
133 volatile store i1 %tz.cmp, i1* %c
134 %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
135 %pop.cmp = icmp eq i32 %pop, 0
136 volatile store i1 %pop.cmp, i1* %c
137 ret void
138; CHECK: @cmp.simplify
139; CHECK-NEXT: entry:
140; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
141; CHECK-NEXT: volatile store i1 %lz.cmp, i1* %c
142; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
143; CHECK-NEXT: volatile store i1 %tz.cmp, i1* %c
144; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
145; CHECK-NEXT: volatile store i1 %pop.cmp, i1* %c
146}
Chris Lattner249da5c2010-01-23 18:49:30 +0000147
148
149define i32 @cttz_simplify1(i32 %x) nounwind readnone ssp {
150 %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x) ; <i32> [#uses=1]
151 %shr3 = lshr i32 %tmp1, 5 ; <i32> [#uses=1]
152 ret i32 %shr3
153
154; CHECK: @cttz_simplify1
155; CHECK: icmp eq i32 %x, 0
156; CHECK-NEXT: zext i1
157; CHECK-NEXT: ret i32
158}
159
160declare i32 @llvm.ctlz.i32(i32) nounwind readnone
161