blob: 7111d687ed4b9cc830056b61703b4dd8eb1c6fcb [file] [log] [blame]
Dan Gohman36a09472009-09-08 23:54:48 +00001; RUN: llc < %s -march=x86-64 > %t
Dan Gohman01ecca22009-04-27 20:16:15 +00002; RUN: not grep and %t
3; RUN: not grep movz %t
4; RUN: not grep sar %t
5; RUN: not grep shl %t
Dan Gohman81db61a2009-05-12 02:17:14 +00006; RUN: grep add %t | count 2
Dan Gohmana10756e2010-01-21 02:09:26 +00007; RUN: grep inc %t | count 3
Dan Gohman81db61a2009-05-12 02:17:14 +00008; RUN: grep dec %t | count 2
Dan Gohmana10756e2010-01-21 02:09:26 +00009; RUN: grep lea %t | count 3
Dan Gohman01ecca22009-04-27 20:16:15 +000010
11; Optimize away zext-inreg and sext-inreg on the loop induction
12; variable using trip-count information.
Dan Gohman01ecca22009-04-27 20:16:15 +000013
14define void @count_up(double* %d, i64 %n) nounwind {
15entry:
16 br label %loop
17
18loop:
19 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
20 %indvar.i8 = and i64 %indvar, 255
21 %t0 = getelementptr double* %d, i64 %indvar.i8
22 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +000023 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +000024 store double %t2, double* %t0
25 %indvar.i24 = and i64 %indvar, 16777215
26 %t3 = getelementptr double* %d, i64 %indvar.i24
27 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +000028 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +000029 store double %t5, double* %t3
30 %t6 = getelementptr double* %d, i64 %indvar
31 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +000032 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +000033 store double %t8, double* %t6
34 %indvar.next = add i64 %indvar, 1
35 %exitcond = icmp eq i64 %indvar.next, 10
36 br i1 %exitcond, label %return, label %loop
37
38return:
39 ret void
40}
41
42define void @count_down(double* %d, i64 %n) nounwind {
43entry:
44 br label %loop
45
46loop:
47 %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
48 %indvar.i8 = and i64 %indvar, 255
49 %t0 = getelementptr double* %d, i64 %indvar.i8
50 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +000051 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +000052 store double %t2, double* %t0
53 %indvar.i24 = and i64 %indvar, 16777215
54 %t3 = getelementptr double* %d, i64 %indvar.i24
55 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +000056 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +000057 store double %t5, double* %t3
58 %t6 = getelementptr double* %d, i64 %indvar
59 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +000060 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +000061 store double %t8, double* %t6
62 %indvar.next = sub i64 %indvar, 1
63 %exitcond = icmp eq i64 %indvar.next, 0
64 br i1 %exitcond, label %return, label %loop
65
66return:
67 ret void
68}
69
70define void @count_up_signed(double* %d, i64 %n) nounwind {
71entry:
72 br label %loop
73
74loop:
75 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
76 %s0 = shl i64 %indvar, 8
77 %indvar.i8 = ashr i64 %s0, 8
78 %t0 = getelementptr double* %d, i64 %indvar.i8
79 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +000080 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +000081 store double %t2, double* %t0
82 %s1 = shl i64 %indvar, 24
83 %indvar.i24 = ashr i64 %s1, 24
84 %t3 = getelementptr double* %d, i64 %indvar.i24
85 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +000086 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +000087 store double %t5, double* %t3
88 %t6 = getelementptr double* %d, i64 %indvar
89 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +000090 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +000091 store double %t8, double* %t6
92 %indvar.next = add i64 %indvar, 1
93 %exitcond = icmp eq i64 %indvar.next, 10
94 br i1 %exitcond, label %return, label %loop
95
96return:
97 ret void
98}
99
100define void @count_down_signed(double* %d, i64 %n) nounwind {
101entry:
102 br label %loop
103
104loop:
105 %indvar = phi i64 [ 10, %entry ], [ %indvar.next, %loop ]
106 %s0 = shl i64 %indvar, 8
107 %indvar.i8 = ashr i64 %s0, 8
108 %t0 = getelementptr double* %d, i64 %indvar.i8
109 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000110 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +0000111 store double %t2, double* %t0
112 %s1 = shl i64 %indvar, 24
113 %indvar.i24 = ashr i64 %s1, 24
114 %t3 = getelementptr double* %d, i64 %indvar.i24
115 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000116 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +0000117 store double %t5, double* %t3
118 %t6 = getelementptr double* %d, i64 %indvar
119 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000120 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +0000121 store double %t8, double* %t6
122 %indvar.next = sub i64 %indvar, 1
123 %exitcond = icmp eq i64 %indvar.next, 0
124 br i1 %exitcond, label %return, label %loop
125
126return:
127 ret void
128}
129
Dan Gohmana10756e2010-01-21 02:09:26 +0000130; TODO: If we could handle all the loads and stores as post-inc users, we could
131; use {-1,+,1} in the induction variable register, and we'd get another inc,
132; one fewer add, and a comparison with zero.
Dan Gohman01ecca22009-04-27 20:16:15 +0000133define void @another_count_up(double* %d, i64 %n) nounwind {
134entry:
135 br label %loop
136
137loop:
138 %indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
139 %indvar.i8 = and i64 %indvar, 255
140 %t0 = getelementptr double* %d, i64 %indvar.i8
141 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000142 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +0000143 store double %t2, double* %t0
144 %indvar.i24 = and i64 %indvar, 16777215
145 %t3 = getelementptr double* %d, i64 %indvar.i24
146 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000147 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +0000148 store double %t5, double* %t3
149 %t6 = getelementptr double* %d, i64 %indvar
150 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000151 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +0000152 store double %t8, double* %t6
153 %indvar.next = add i64 %indvar, 1
154 %exitcond = icmp eq i64 %indvar.next, 0
155 br i1 %exitcond, label %return, label %loop
156
157return:
158 ret void
159}
160
161define void @another_count_down(double* %d, i64 %n) nounwind {
162entry:
163 br label %loop
164
165loop:
166 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
167 %indvar.i8 = and i64 %indvar, 255
168 %t0 = getelementptr double* %d, i64 %indvar.i8
169 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000170 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +0000171 store double %t2, double* %t0
172 %indvar.i24 = and i64 %indvar, 16777215
173 %t3 = getelementptr double* %d, i64 %indvar.i24
174 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000175 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +0000176 store double %t5, double* %t3
177 %t6 = getelementptr double* %d, i64 %indvar
178 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000179 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +0000180 store double %t8, double* %t6
181 %indvar.next = sub i64 %indvar, 1
182 %exitcond = icmp eq i64 %indvar.next, 18446744073709551615
183 br i1 %exitcond, label %return, label %loop
184
185return:
186 ret void
187}
188
189define void @another_count_up_signed(double* %d, i64 %n) nounwind {
190entry:
191 br label %loop
192
193loop:
194 %indvar = phi i64 [ 18446744073709551615, %entry ], [ %indvar.next, %loop ]
195 %s0 = shl i64 %indvar, 8
196 %indvar.i8 = ashr i64 %s0, 8
197 %t0 = getelementptr double* %d, i64 %indvar.i8
198 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000199 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +0000200 store double %t2, double* %t0
201 %s1 = shl i64 %indvar, 24
202 %indvar.i24 = ashr i64 %s1, 24
203 %t3 = getelementptr double* %d, i64 %indvar.i24
204 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000205 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +0000206 store double %t5, double* %t3
207 %t6 = getelementptr double* %d, i64 %indvar
208 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000209 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +0000210 store double %t8, double* %t6
211 %indvar.next = add i64 %indvar, 1
212 %exitcond = icmp eq i64 %indvar.next, 0
213 br i1 %exitcond, label %return, label %loop
214
215return:
216 ret void
217}
218
219define void @another_count_down_signed(double* %d, i64 %n) nounwind {
220entry:
221 br label %loop
222
223loop:
224 %indvar = phi i64 [ 0, %entry ], [ %indvar.next, %loop ]
225 %s0 = shl i64 %indvar, 8
226 %indvar.i8 = ashr i64 %s0, 8
227 %t0 = getelementptr double* %d, i64 %indvar.i8
228 %t1 = load double* %t0
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000229 %t2 = fmul double %t1, 0.1
Dan Gohman01ecca22009-04-27 20:16:15 +0000230 store double %t2, double* %t0
231 %s1 = shl i64 %indvar, 24
232 %indvar.i24 = ashr i64 %s1, 24
233 %t3 = getelementptr double* %d, i64 %indvar.i24
234 %t4 = load double* %t3
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000235 %t5 = fmul double %t4, 2.3
Dan Gohman01ecca22009-04-27 20:16:15 +0000236 store double %t5, double* %t3
237 %t6 = getelementptr double* %d, i64 %indvar
238 %t7 = load double* %t6
Dan Gohmanae3a0be2009-06-04 22:49:04 +0000239 %t8 = fmul double %t7, 4.5
Dan Gohman01ecca22009-04-27 20:16:15 +0000240 store double %t8, double* %t6
241 %indvar.next = sub i64 %indvar, 1
242 %exitcond = icmp eq i64 %indvar.next, 18446744073709551615
243 br i1 %exitcond, label %return, label %loop
244
245return:
246 ret void
247}