blob: 1311a73fd32c733caef13ea296c960e5849d0ec0 [file] [log] [blame]
Eric Christophereb19e9e2011-03-08 02:42:25 +00001; XFAIL: *
Andrew Trick267b57d2012-03-21 22:31:31 +00002; ...should pass. See PR12324: misched bringup
Dan Gohman2446f572010-02-19 00:05:23 +00003; RUN: llc < %s -march=x86-64 -O3 -asm-verbose=false | FileCheck %s
Dan Gohman45774ce2010-02-12 10:34:29 +00004target datalayout = "e-p:64:64:64"
5target triple = "x86_64-unknown-unknown"
6
7; Full strength reduction reduces register pressure from 5 to 4 here.
8; Instruction selection should use the FLAGS value from the dec for
9; the branch. Scheduling should push the adds upwards.
10
11; CHECK: full_me_0:
12; CHECK: movsd (%rsi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +000013; CHECK: mulsd (%rdx), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +000014; CHECK: movsd %xmm0, (%rdi)
Evan Chengbf724b92010-03-18 06:55:42 +000015; CHECK: addq $8, %rsi
16; CHECK: addq $8, %rdx
Dan Gohman45774ce2010-02-12 10:34:29 +000017; CHECK: addq $8, %rdi
18; CHECK: decq %rcx
19; CHECK: jne
20
21define void @full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
22entry:
23 %t0 = icmp sgt i64 %n, 0
24 br i1 %t0, label %loop, label %return
25
26loop:
27 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
28 %Ai = getelementptr inbounds double* %A, i64 %i
29 %Bi = getelementptr inbounds double* %B, i64 %i
30 %Ci = getelementptr inbounds double* %C, i64 %i
31 %t1 = load double* %Bi
32 %t2 = load double* %Ci
33 %m = fmul double %t1, %t2
34 store double %m, double* %Ai
35 %i.next = add nsw i64 %i, 1
36 %exitcond = icmp eq i64 %i.next, %n
37 br i1 %exitcond, label %return, label %loop
38
39return:
40 ret void
41}
42
43; Mostly-full strength reduction means we do full strength reduction on all
44; except for the offsets.
45;
46; Given a choice between constant offsets -2048 and 2048, choose the negative
47; value, because at boundary conditions it has a smaller encoding.
48; TODO: That's an over-general heuristic. It would be better for the target
49; to indicate what the encoding cost would be. Then using a 2048 offset
50; would be better on x86-64, since the start value would be 0 instead of
51; 2048.
52
53; CHECK: mostly_full_me_0:
54; CHECK: movsd -2048(%rsi), %xmm0
55; CHECK: mulsd -2048(%rdx), %xmm0
56; CHECK: movsd %xmm0, -2048(%rdi)
57; CHECK: movsd (%rsi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +000058; CHECK: divsd (%rdx), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +000059; CHECK: movsd %xmm0, (%rdi)
Evan Chengbf724b92010-03-18 06:55:42 +000060; CHECK: addq $8, %rsi
61; CHECK: addq $8, %rdx
Dan Gohman45774ce2010-02-12 10:34:29 +000062; CHECK: addq $8, %rdi
63; CHECK: decq %rcx
64; CHECK: jne
65
66define void @mostly_full_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
67entry:
68 %t0 = icmp sgt i64 %n, 0
69 br i1 %t0, label %loop, label %return
70
71loop:
72 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
73 %Ai = getelementptr inbounds double* %A, i64 %i
74 %Bi = getelementptr inbounds double* %B, i64 %i
75 %Ci = getelementptr inbounds double* %C, i64 %i
76 %t1 = load double* %Bi
77 %t2 = load double* %Ci
78 %m = fmul double %t1, %t2
79 store double %m, double* %Ai
80 %j = add i64 %i, 256
81 %Aj = getelementptr inbounds double* %A, i64 %j
82 %Bj = getelementptr inbounds double* %B, i64 %j
83 %Cj = getelementptr inbounds double* %C, i64 %j
84 %t3 = load double* %Bj
85 %t4 = load double* %Cj
86 %o = fdiv double %t3, %t4
87 store double %o, double* %Aj
88 %i.next = add nsw i64 %i, 1
89 %exitcond = icmp eq i64 %i.next, %n
90 br i1 %exitcond, label %return, label %loop
91
92return:
93 ret void
94}
95
96; A minor variation on mostly_full_me_0.
97; Prefer to start the indvar at 0.
98
99; CHECK: mostly_full_me_1:
100; CHECK: movsd (%rsi), %xmm0
101; CHECK: mulsd (%rdx), %xmm0
102; CHECK: movsd %xmm0, (%rdi)
103; CHECK: movsd -2048(%rsi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000104; CHECK: divsd -2048(%rdx), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000105; CHECK: movsd %xmm0, -2048(%rdi)
Evan Chengbf724b92010-03-18 06:55:42 +0000106; CHECK: addq $8, %rsi
107; CHECK: addq $8, %rdx
Dan Gohman45774ce2010-02-12 10:34:29 +0000108; CHECK: addq $8, %rdi
109; CHECK: decq %rcx
110; CHECK: jne
111
112define void @mostly_full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
113entry:
114 %t0 = icmp sgt i64 %n, 0
115 br i1 %t0, label %loop, label %return
116
117loop:
118 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
119 %Ai = getelementptr inbounds double* %A, i64 %i
120 %Bi = getelementptr inbounds double* %B, i64 %i
121 %Ci = getelementptr inbounds double* %C, i64 %i
122 %t1 = load double* %Bi
123 %t2 = load double* %Ci
124 %m = fmul double %t1, %t2
125 store double %m, double* %Ai
126 %j = sub i64 %i, 256
127 %Aj = getelementptr inbounds double* %A, i64 %j
128 %Bj = getelementptr inbounds double* %B, i64 %j
129 %Cj = getelementptr inbounds double* %C, i64 %j
130 %t3 = load double* %Bj
131 %t4 = load double* %Cj
132 %o = fdiv double %t3, %t4
133 store double %o, double* %Aj
134 %i.next = add nsw i64 %i, 1
135 %exitcond = icmp eq i64 %i.next, %n
136 br i1 %exitcond, label %return, label %loop
137
138return:
139 ret void
140}
141
142; A slightly less minor variation on mostly_full_me_0.
143
144; CHECK: mostly_full_me_2:
145; CHECK: movsd (%rsi), %xmm0
146; CHECK: mulsd (%rdx), %xmm0
147; CHECK: movsd %xmm0, (%rdi)
148; CHECK: movsd -4096(%rsi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000149; CHECK: divsd -4096(%rdx), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000150; CHECK: movsd %xmm0, -4096(%rdi)
Evan Chengbf724b92010-03-18 06:55:42 +0000151; CHECK: addq $8, %rsi
152; CHECK: addq $8, %rdx
Dan Gohman45774ce2010-02-12 10:34:29 +0000153; CHECK: addq $8, %rdi
154; CHECK: decq %rcx
155; CHECK: jne
156
157define void @mostly_full_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
158entry:
159 %t0 = icmp sgt i64 %n, 0
160 br i1 %t0, label %loop, label %return
161
162loop:
163 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
164 %k = add i64 %i, 256
165 %Ak = getelementptr inbounds double* %A, i64 %k
166 %Bk = getelementptr inbounds double* %B, i64 %k
167 %Ck = getelementptr inbounds double* %C, i64 %k
168 %t1 = load double* %Bk
169 %t2 = load double* %Ck
170 %m = fmul double %t1, %t2
171 store double %m, double* %Ak
172 %j = sub i64 %i, 256
173 %Aj = getelementptr inbounds double* %A, i64 %j
174 %Bj = getelementptr inbounds double* %B, i64 %j
175 %Cj = getelementptr inbounds double* %C, i64 %j
176 %t3 = load double* %Bj
177 %t4 = load double* %Cj
178 %o = fdiv double %t3, %t4
179 store double %o, double* %Aj
180 %i.next = add nsw i64 %i, 1
181 %exitcond = icmp eq i64 %i.next, %n
182 br i1 %exitcond, label %return, label %loop
183
184return:
185 ret void
186}
187
188; In this test, the counting IV exit value is used, so full strength reduction
189; would not reduce register pressure. IndVarSimplify ought to simplify such
190; cases away, but it's useful here to verify that LSR's register pressure
191; heuristics are working as expected.
192
193; CHECK: count_me_0:
194; CHECK: movsd (%rsi,%rax,8), %xmm0
195; CHECK: mulsd (%rdx,%rax,8), %xmm0
196; CHECK: movsd %xmm0, (%rdi,%rax,8)
197; CHECK: incq %rax
198; CHECK: cmpq %rax, %rcx
199; CHECK: jne
200
201define i64 @count_me_0(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
202entry:
203 %t0 = icmp sgt i64 %n, 0
204 br i1 %t0, label %loop, label %return
205
206loop:
207 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
208 %Ai = getelementptr inbounds double* %A, i64 %i
209 %Bi = getelementptr inbounds double* %B, i64 %i
210 %Ci = getelementptr inbounds double* %C, i64 %i
211 %t1 = load double* %Bi
212 %t2 = load double* %Ci
213 %m = fmul double %t1, %t2
214 store double %m, double* %Ai
215 %i.next = add nsw i64 %i, 1
216 %exitcond = icmp eq i64 %i.next, %n
217 br i1 %exitcond, label %return, label %loop
218
219return:
220 %q = phi i64 [ 0, %entry ], [ %i.next, %loop ]
221 ret i64 %q
222}
223
224; In this test, the trip count value is used, so full strength reduction
225; would not reduce register pressure.
226; (though it would reduce register pressure inside the loop...)
227
228; CHECK: count_me_1:
229; CHECK: movsd (%rsi,%rax,8), %xmm0
230; CHECK: mulsd (%rdx,%rax,8), %xmm0
231; CHECK: movsd %xmm0, (%rdi,%rax,8)
232; CHECK: incq %rax
233; CHECK: cmpq %rax, %rcx
234; CHECK: jne
235
236define i64 @count_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
237entry:
238 %t0 = icmp sgt i64 %n, 0
239 br i1 %t0, label %loop, label %return
240
241loop:
242 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
243 %Ai = getelementptr inbounds double* %A, i64 %i
244 %Bi = getelementptr inbounds double* %B, i64 %i
245 %Ci = getelementptr inbounds double* %C, i64 %i
246 %t1 = load double* %Bi
247 %t2 = load double* %Ci
248 %m = fmul double %t1, %t2
249 store double %m, double* %Ai
250 %i.next = add nsw i64 %i, 1
251 %exitcond = icmp eq i64 %i.next, %n
252 br i1 %exitcond, label %return, label %loop
253
254return:
255 %q = phi i64 [ 0, %entry ], [ %n, %loop ]
256 ret i64 %q
257}
258
259; Full strength reduction doesn't save any registers here because the
260; loop tripcount is a constant.
261
262; CHECK: count_me_2:
263; CHECK: movl $10, %eax
264; CHECK: align
Dan Gohman4fee6f32010-04-17 16:29:15 +0000265; CHECK: BB6_1:
Dan Gohman45774ce2010-02-12 10:34:29 +0000266; CHECK: movsd -40(%rdi,%rax,8), %xmm0
267; CHECK: addsd -40(%rsi,%rax,8), %xmm0
268; CHECK: movsd %xmm0, -40(%rdx,%rax,8)
269; CHECK: movsd (%rdi,%rax,8), %xmm0
270; CHECK: subsd (%rsi,%rax,8), %xmm0
271; CHECK: movsd %xmm0, (%rdx,%rax,8)
272; CHECK: incq %rax
273; CHECK: cmpq $5010, %rax
274; CHECK: jne
275
276define void @count_me_2(double* nocapture %A, double* nocapture %B, double* nocapture %C) nounwind {
277entry:
278 br label %loop
279
280loop:
281 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
282 %i5 = add i64 %i, 5
283 %Ai = getelementptr double* %A, i64 %i5
284 %t2 = load double* %Ai
285 %Bi = getelementptr double* %B, i64 %i5
286 %t4 = load double* %Bi
287 %t5 = fadd double %t2, %t4
288 %Ci = getelementptr double* %C, i64 %i5
289 store double %t5, double* %Ci
290 %i10 = add i64 %i, 10
291 %Ai10 = getelementptr double* %A, i64 %i10
292 %t9 = load double* %Ai10
293 %Bi10 = getelementptr double* %B, i64 %i10
294 %t11 = load double* %Bi10
295 %t12 = fsub double %t9, %t11
296 %Ci10 = getelementptr double* %C, i64 %i10
297 store double %t12, double* %Ci10
298 %i.next = add i64 %i, 1
299 %exitcond = icmp eq i64 %i.next, 5000
300 br i1 %exitcond, label %return, label %loop
301
302return:
303 ret void
304}
305
306; This should be fully strength-reduced to reduce register pressure.
307
308; CHECK: full_me_1:
309; CHECK: align
Dan Gohman4fee6f32010-04-17 16:29:15 +0000310; CHECK: BB7_1:
Dan Gohman45774ce2010-02-12 10:34:29 +0000311; CHECK: movsd (%rdi), %xmm0
312; CHECK: addsd (%rsi), %xmm0
313; CHECK: movsd %xmm0, (%rdx)
314; CHECK: movsd 40(%rdi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000315; CHECK: subsd 40(%rsi), %xmm0
Dan Gohman45774ce2010-02-12 10:34:29 +0000316; CHECK: movsd %xmm0, 40(%rdx)
Evan Chengbf724b92010-03-18 06:55:42 +0000317; CHECK: addq $8, %rdi
318; CHECK: addq $8, %rsi
Dan Gohman45774ce2010-02-12 10:34:29 +0000319; CHECK: addq $8, %rdx
320; CHECK: decq %rcx
321; CHECK: jne
322
323define void @full_me_1(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
324entry:
325 br label %loop
326
327loop:
328 %i = phi i64 [ 0, %entry ], [ %i.next, %loop ]
329 %i5 = add i64 %i, 5
330 %Ai = getelementptr double* %A, i64 %i5
331 %t2 = load double* %Ai
332 %Bi = getelementptr double* %B, i64 %i5
333 %t4 = load double* %Bi
334 %t5 = fadd double %t2, %t4
335 %Ci = getelementptr double* %C, i64 %i5
336 store double %t5, double* %Ci
337 %i10 = add i64 %i, 10
338 %Ai10 = getelementptr double* %A, i64 %i10
339 %t9 = load double* %Ai10
340 %Bi10 = getelementptr double* %B, i64 %i10
341 %t11 = load double* %Bi10
342 %t12 = fsub double %t9, %t11
343 %Ci10 = getelementptr double* %C, i64 %i10
344 store double %t12, double* %Ci10
345 %i.next = add i64 %i, 1
346 %exitcond = icmp eq i64 %i.next, %n
347 br i1 %exitcond, label %return, label %loop
348
349return:
350 ret void
351}
352
353; This is a variation on full_me_0 in which the 0,+,1 induction variable
354; has a non-address use, pinning that value in a register.
355
356; CHECK: count_me_3:
357; CHECK: call
Evan Chenge53ab6d2010-09-17 22:28:18 +0000358; CHECK: movsd (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
359; CHECK: mulsd (%r{{[^,]*}},%r{{[^,]*}},8), %xmm0
360; CHECK: movsd %xmm0, (%r{{[^,]*}},%r{{[^,]*}},8)
361; CHECK: incq %r{{.*}}
362; CHECK: cmpq %r{{.*}}, %r{{.*}}
Dan Gohman45774ce2010-02-12 10:34:29 +0000363; CHECK: jne
364
365declare void @use(i64)
366
367define void @count_me_3(double* nocapture %A, double* nocapture %B, double* nocapture %C, i64 %n) nounwind {
368entry:
369 %t0 = icmp sgt i64 %n, 0
370 br i1 %t0, label %loop, label %return
371
372loop:
373 %i = phi i64 [ %i.next, %loop ], [ 0, %entry ]
374 call void @use(i64 %i)
375 %Ai = getelementptr inbounds double* %A, i64 %i
376 %Bi = getelementptr inbounds double* %B, i64 %i
377 %Ci = getelementptr inbounds double* %C, i64 %i
378 %t1 = load double* %Bi
379 %t2 = load double* %Ci
380 %m = fmul double %t1, %t2
381 store double %m, double* %Ai
382 %i.next = add nsw i64 %i, 1
383 %exitcond = icmp eq i64 %i.next, %n
384 br i1 %exitcond, label %return, label %loop
385
386return:
387 ret void
388}
Dan Gohman2446f572010-02-19 00:05:23 +0000389
390; LSR should use only one indvar for the inner loop.
391; rdar://7657764
392
393; CHECK: asd:
Jakob Stoklund Oleseneb12f492010-09-30 20:51:52 +0000394; CHECK: BB9_4:
Dan Gohman2446f572010-02-19 00:05:23 +0000395; CHECK-NEXT: addl (%r{{[^,]*}},%rdi,4), %e
396; CHECK-NEXT: incq %rdi
397; CHECK-NEXT: cmpq %rdi, %r{{[^,]*}}
398; CHECK-NEXT: jg
399
400%struct.anon = type { i32, [4200 x i32] }
401
402@bars = common global [123123 x %struct.anon] zeroinitializer, align 32 ; <[123123 x %struct.anon]*> [#uses=2]
403
404define i32 @asd(i32 %n) nounwind readonly {
405entry:
406 %0 = icmp sgt i32 %n, 0 ; <i1> [#uses=1]
407 br i1 %0, label %bb.nph14, label %bb5
408
409bb.nph14: ; preds = %entry
410 %tmp18 = zext i32 %n to i64 ; <i64> [#uses=1]
411 br label %bb
412
413bb: ; preds = %bb3, %bb.nph14
414 %indvar16 = phi i64 [ 0, %bb.nph14 ], [ %indvar.next17, %bb3 ] ; <i64> [#uses=3]
415 %s.113 = phi i32 [ 0, %bb.nph14 ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=2]
416 %scevgep2526 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 0 ; <i32*> [#uses=1]
417 %1 = load i32* %scevgep2526, align 4 ; <i32> [#uses=2]
418 %2 = icmp sgt i32 %1, 0 ; <i1> [#uses=1]
419 br i1 %2, label %bb.nph, label %bb3
420
421bb.nph: ; preds = %bb
422 %tmp23 = sext i32 %1 to i64 ; <i64> [#uses=1]
423 br label %bb1
424
425bb1: ; preds = %bb.nph, %bb1
426 %indvar = phi i64 [ 0, %bb.nph ], [ %tmp19, %bb1 ] ; <i64> [#uses=2]
427 %s.07 = phi i32 [ %s.113, %bb.nph ], [ %4, %bb1 ] ; <i32> [#uses=1]
428 %c.08 = getelementptr [123123 x %struct.anon]* @bars, i64 0, i64 %indvar16, i32 1, i64 %indvar ; <i32*> [#uses=1]
429 %3 = load i32* %c.08, align 4 ; <i32> [#uses=1]
430 %4 = add nsw i32 %3, %s.07 ; <i32> [#uses=2]
431 %tmp19 = add i64 %indvar, 1 ; <i64> [#uses=2]
432 %5 = icmp sgt i64 %tmp23, %tmp19 ; <i1> [#uses=1]
433 br i1 %5, label %bb1, label %bb3
434
435bb3: ; preds = %bb1, %bb
436 %s.0.lcssa = phi i32 [ %s.113, %bb ], [ %4, %bb1 ] ; <i32> [#uses=2]
437 %indvar.next17 = add i64 %indvar16, 1 ; <i64> [#uses=2]
438 %exitcond = icmp eq i64 %indvar.next17, %tmp18 ; <i1> [#uses=1]
439 br i1 %exitcond, label %bb5, label %bb
440
441bb5: ; preds = %bb3, %entry
442 %s.1.lcssa = phi i32 [ 0, %entry ], [ %s.0.lcssa, %bb3 ] ; <i32> [#uses=1]
443 ret i32 %s.1.lcssa
444}
Dan Gohman51d00092010-06-19 21:29:59 +0000445
446; Two loops here are of particular interest; the one at %bb21, where
447; we don't want to leave extra induction variables around, or use an
448; lea to compute an exit condition inside the loop:
449
450; CHECK: test:
451
452; CHECK: BB10_4:
453; CHECK-NEXT: movaps %xmm{{.*}}, %xmm{{.*}}
454; CHECK-NEXT: addss %xmm{{.*}}, %xmm{{.*}}
455; CHECK-NEXT: mulss (%r{{[^,]*}}), %xmm{{.*}}
456; CHECK-NEXT: movss %xmm{{.*}}, (%r{{[^,]*}})
Dan Gohman51d00092010-06-19 21:29:59 +0000457; CHECK-NEXT: addq $4, %r{{.*}}
Dan Gohman110ed642010-09-01 01:45:53 +0000458; CHECK-NEXT: decq %r{{.*}}
Dan Gohman3a08ed72010-08-29 16:40:03 +0000459; CHECK-NEXT: addq $4, %r{{.*}}
Dan Gohman51d00092010-06-19 21:29:59 +0000460; CHECK-NEXT: movaps %xmm{{.*}}, %xmm{{.*}}
461; CHECK-NEXT: BB10_2:
462; CHECK-NEXT: testq %r{{.*}}, %r{{.*}}
463; CHECK-NEXT: jle
464; CHECK-NEXT: testb $15, %r{{.*}}
465; CHECK-NEXT: jne
466
467; And the one at %bb68, where we want to be sure to use superhero mode:
468
Jakob Stoklund Oleseneb12f492010-09-30 20:51:52 +0000469; CHECK: BB10_7:
Dan Gohman3c1b3c62010-06-21 22:17:20 +0000470; CHECK-NEXT: movaps 48(%r{{[^,]*}}), %xmm{{.*}}
471; CHECK-NEXT: mulps %xmm{{.*}}, %xmm{{.*}}
472; CHECK-NEXT: movaps 32(%r{{[^,]*}}), %xmm{{.*}}
473; CHECK-NEXT: mulps %xmm{{.*}}, %xmm{{.*}}
474; CHECK-NEXT: movaps 16(%r{{[^,]*}}), %xmm{{.*}}
475; CHECK-NEXT: mulps %xmm{{.*}}, %xmm{{.*}}
476; CHECK-NEXT: movaps (%r{{[^,]*}}), %xmm{{.*}}
477; CHECK-NEXT: mulps %xmm{{.*}}, %xmm{{.*}}
Dan Gohman51d00092010-06-19 21:29:59 +0000478; CHECK-NEXT: movaps %xmm{{.*}}, (%r{{[^,]*}})
479; CHECK-NEXT: movaps %xmm{{.*}}, 16(%r{{[^,]*}})
480; CHECK-NEXT: movaps %xmm{{.*}}, 32(%r{{[^,]*}})
481; CHECK-NEXT: movaps %xmm{{.*}}, 48(%r{{[^,]*}})
482; CHECK-NEXT: addps %xmm{{.*}}, %xmm{{.*}}
483; CHECK-NEXT: addps %xmm{{.*}}, %xmm{{.*}}
484; CHECK-NEXT: addps %xmm{{.*}}, %xmm{{.*}}
485; CHECK-NEXT: addps %xmm{{.*}}, %xmm{{.*}}
486; CHECK-NEXT: addq $64, %r{{.*}}
487; CHECK-NEXT: addq $64, %r{{.*}}
488; CHECK-NEXT: addq $-16, %r{{.*}}
Dan Gohman51d00092010-06-19 21:29:59 +0000489; CHECK-NEXT: cmpq $15, %r{{.*}}
490; CHECK-NEXT: jg
491
492define void @test(float* %arg, i64 %arg1, float* nocapture %arg2, float* nocapture %arg3, float* %arg4, i64 %arg5, i64 %arg6) nounwind {
493bb:
494 %t = alloca float, align 4 ; <float*> [#uses=3]
495 %t7 = alloca float, align 4 ; <float*> [#uses=2]
496 %t8 = load float* %arg3 ; <float> [#uses=8]
497 %t9 = ptrtoint float* %arg to i64 ; <i64> [#uses=1]
498 %t10 = ptrtoint float* %arg4 to i64 ; <i64> [#uses=1]
499 %t11 = xor i64 %t10, %t9 ; <i64> [#uses=1]
500 %t12 = and i64 %t11, 15 ; <i64> [#uses=1]
501 %t13 = icmp eq i64 %t12, 0 ; <i1> [#uses=1]
502 %t14 = xor i64 %arg1, 1 ; <i64> [#uses=1]
503 %t15 = xor i64 %arg5, 1 ; <i64> [#uses=1]
504 %t16 = or i64 %t15, %t14 ; <i64> [#uses=1]
505 %t17 = trunc i64 %t16 to i32 ; <i32> [#uses=1]
506 %t18 = icmp eq i32 %t17, 0 ; <i1> [#uses=1]
507 br i1 %t18, label %bb19, label %bb213
508
509bb19: ; preds = %bb
510 %t20 = load float* %arg2 ; <float> [#uses=1]
511 br label %bb21
512
513bb21: ; preds = %bb32, %bb19
514 %t22 = phi i64 [ %t36, %bb32 ], [ 0, %bb19 ] ; <i64> [#uses=21]
515 %t23 = phi float [ %t35, %bb32 ], [ %t20, %bb19 ] ; <float> [#uses=6]
516 %t24 = sub i64 %arg6, %t22 ; <i64> [#uses=4]
517 %t25 = getelementptr float* %arg4, i64 %t22 ; <float*> [#uses=4]
518 %t26 = getelementptr float* %arg, i64 %t22 ; <float*> [#uses=3]
519 %t27 = icmp sgt i64 %t24, 0 ; <i1> [#uses=1]
520 br i1 %t27, label %bb28, label %bb37
521
522bb28: ; preds = %bb21
523 %t29 = ptrtoint float* %t25 to i64 ; <i64> [#uses=1]
524 %t30 = and i64 %t29, 15 ; <i64> [#uses=1]
525 %t31 = icmp eq i64 %t30, 0 ; <i1> [#uses=1]
526 br i1 %t31, label %bb37, label %bb32
527
528bb32: ; preds = %bb28
529 %t33 = load float* %t26 ; <float> [#uses=1]
530 %t34 = fmul float %t23, %t33 ; <float> [#uses=1]
531 store float %t34, float* %t25
532 %t35 = fadd float %t23, %t8 ; <float> [#uses=1]
533 %t36 = add i64 %t22, 1 ; <i64> [#uses=1]
534 br label %bb21
535
536bb37: ; preds = %bb28, %bb21
537 %t38 = fmul float %t8, 4.000000e+00 ; <float> [#uses=1]
538 store float %t38, float* %t
539 %t39 = fmul float %t8, 1.600000e+01 ; <float> [#uses=1]
540 store float %t39, float* %t7
541 %t40 = fmul float %t8, 0.000000e+00 ; <float> [#uses=1]
542 %t41 = fadd float %t23, %t40 ; <float> [#uses=1]
543 %t42 = insertelement <4 x float> undef, float %t41, i32 0 ; <<4 x float>> [#uses=1]
544 %t43 = fadd float %t23, %t8 ; <float> [#uses=1]
545 %t44 = insertelement <4 x float> %t42, float %t43, i32 1 ; <<4 x float>> [#uses=1]
546 %t45 = fmul float %t8, 2.000000e+00 ; <float> [#uses=1]
547 %t46 = fadd float %t23, %t45 ; <float> [#uses=1]
548 %t47 = insertelement <4 x float> %t44, float %t46, i32 2 ; <<4 x float>> [#uses=1]
549 %t48 = fmul float %t8, 3.000000e+00 ; <float> [#uses=1]
550 %t49 = fadd float %t23, %t48 ; <float> [#uses=1]
551 %t50 = insertelement <4 x float> %t47, float %t49, i32 3 ; <<4 x float>> [#uses=5]
552 %t51 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t) nounwind ; <<4 x float>> [#uses=3]
553 %t52 = fadd <4 x float> %t50, %t51 ; <<4 x float>> [#uses=3]
554 %t53 = fadd <4 x float> %t52, %t51 ; <<4 x float>> [#uses=3]
555 %t54 = fadd <4 x float> %t53, %t51 ; <<4 x float>> [#uses=2]
556 %t55 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t7) nounwind ; <<4 x float>> [#uses=8]
557 %t56 = icmp sgt i64 %t24, 15 ; <i1> [#uses=2]
558 br i1 %t13, label %bb57, label %bb118
559
560bb57: ; preds = %bb37
561 br i1 %t56, label %bb61, label %bb112
562
563bb58: ; preds = %bb68
564 %t59 = getelementptr float* %arg, i64 %t78 ; <float*> [#uses=1]
565 %t60 = getelementptr float* %arg4, i64 %t78 ; <float*> [#uses=1]
566 br label %bb112
567
568bb61: ; preds = %bb57
569 %t62 = add i64 %t22, 16 ; <i64> [#uses=1]
570 %t63 = add i64 %t22, 4 ; <i64> [#uses=1]
571 %t64 = add i64 %t22, 8 ; <i64> [#uses=1]
572 %t65 = add i64 %t22, 12 ; <i64> [#uses=1]
573 %t66 = add i64 %arg6, -16 ; <i64> [#uses=1]
574 %t67 = sub i64 %t66, %t22 ; <i64> [#uses=1]
575 br label %bb68
576
577bb68: ; preds = %bb68, %bb61
578 %t69 = phi i64 [ 0, %bb61 ], [ %t111, %bb68 ] ; <i64> [#uses=3]
579 %t70 = phi <4 x float> [ %t54, %bb61 ], [ %t107, %bb68 ] ; <<4 x float>> [#uses=2]
580 %t71 = phi <4 x float> [ %t50, %bb61 ], [ %t103, %bb68 ] ; <<4 x float>> [#uses=2]
581 %t72 = phi <4 x float> [ %t53, %bb61 ], [ %t108, %bb68 ] ; <<4 x float>> [#uses=2]
582 %t73 = phi <4 x float> [ %t52, %bb61 ], [ %t109, %bb68 ] ; <<4 x float>> [#uses=2]
583 %t74 = shl i64 %t69, 4 ; <i64> [#uses=5]
584 %t75 = add i64 %t22, %t74 ; <i64> [#uses=2]
585 %t76 = getelementptr float* %arg, i64 %t75 ; <float*> [#uses=1]
586 %t77 = bitcast float* %t76 to <4 x float>* ; <<4 x float>*> [#uses=1]
587 %t78 = add i64 %t62, %t74 ; <i64> [#uses=2]
588 %t79 = add i64 %t63, %t74 ; <i64> [#uses=2]
589 %t80 = getelementptr float* %arg, i64 %t79 ; <float*> [#uses=1]
590 %t81 = bitcast float* %t80 to <4 x float>* ; <<4 x float>*> [#uses=1]
591 %t82 = add i64 %t64, %t74 ; <i64> [#uses=2]
592 %t83 = getelementptr float* %arg, i64 %t82 ; <float*> [#uses=1]
593 %t84 = bitcast float* %t83 to <4 x float>* ; <<4 x float>*> [#uses=1]
594 %t85 = add i64 %t65, %t74 ; <i64> [#uses=2]
595 %t86 = getelementptr float* %arg, i64 %t85 ; <float*> [#uses=1]
596 %t87 = bitcast float* %t86 to <4 x float>* ; <<4 x float>*> [#uses=1]
597 %t88 = getelementptr float* %arg4, i64 %t75 ; <float*> [#uses=1]
598 %t89 = bitcast float* %t88 to <4 x float>* ; <<4 x float>*> [#uses=1]
599 %t90 = getelementptr float* %arg4, i64 %t79 ; <float*> [#uses=1]
600 %t91 = bitcast float* %t90 to <4 x float>* ; <<4 x float>*> [#uses=1]
601 %t92 = getelementptr float* %arg4, i64 %t82 ; <float*> [#uses=1]
602 %t93 = bitcast float* %t92 to <4 x float>* ; <<4 x float>*> [#uses=1]
603 %t94 = getelementptr float* %arg4, i64 %t85 ; <float*> [#uses=1]
604 %t95 = bitcast float* %t94 to <4 x float>* ; <<4 x float>*> [#uses=1]
605 %t96 = mul i64 %t69, -16 ; <i64> [#uses=1]
606 %t97 = add i64 %t67, %t96 ; <i64> [#uses=2]
607 %t98 = load <4 x float>* %t77 ; <<4 x float>> [#uses=1]
608 %t99 = load <4 x float>* %t81 ; <<4 x float>> [#uses=1]
609 %t100 = load <4 x float>* %t84 ; <<4 x float>> [#uses=1]
610 %t101 = load <4 x float>* %t87 ; <<4 x float>> [#uses=1]
611 %t102 = fmul <4 x float> %t98, %t71 ; <<4 x float>> [#uses=1]
612 %t103 = fadd <4 x float> %t71, %t55 ; <<4 x float>> [#uses=2]
613 %t104 = fmul <4 x float> %t99, %t73 ; <<4 x float>> [#uses=1]
614 %t105 = fmul <4 x float> %t100, %t72 ; <<4 x float>> [#uses=1]
615 %t106 = fmul <4 x float> %t101, %t70 ; <<4 x float>> [#uses=1]
616 store <4 x float> %t102, <4 x float>* %t89
617 store <4 x float> %t104, <4 x float>* %t91
618 store <4 x float> %t105, <4 x float>* %t93
619 store <4 x float> %t106, <4 x float>* %t95
620 %t107 = fadd <4 x float> %t70, %t55 ; <<4 x float>> [#uses=1]
621 %t108 = fadd <4 x float> %t72, %t55 ; <<4 x float>> [#uses=1]
622 %t109 = fadd <4 x float> %t73, %t55 ; <<4 x float>> [#uses=1]
623 %t110 = icmp sgt i64 %t97, 15 ; <i1> [#uses=1]
624 %t111 = add i64 %t69, 1 ; <i64> [#uses=1]
625 br i1 %t110, label %bb68, label %bb58
626
627bb112: ; preds = %bb58, %bb57
628 %t113 = phi float* [ %t59, %bb58 ], [ %t26, %bb57 ] ; <float*> [#uses=1]
629 %t114 = phi float* [ %t60, %bb58 ], [ %t25, %bb57 ] ; <float*> [#uses=1]
630 %t115 = phi <4 x float> [ %t103, %bb58 ], [ %t50, %bb57 ] ; <<4 x float>> [#uses=1]
631 %t116 = phi i64 [ %t97, %bb58 ], [ %t24, %bb57 ] ; <i64> [#uses=1]
632 %t117 = call <4 x float> asm "movss $1, $0\09\0Apshufd $$0, $0, $0", "=x,*m,~{dirflag},~{fpsr},~{flags}"(float* %t) nounwind ; <<4 x float>> [#uses=0]
633 br label %bb194
634
635bb118: ; preds = %bb37
636 br i1 %t56, label %bb122, label %bb194
637
638bb119: ; preds = %bb137
639 %t120 = getelementptr float* %arg, i64 %t145 ; <float*> [#uses=1]
640 %t121 = getelementptr float* %arg4, i64 %t145 ; <float*> [#uses=1]
641 br label %bb194
642
643bb122: ; preds = %bb118
644 %t123 = add i64 %t22, -1 ; <i64> [#uses=1]
645 %t124 = getelementptr inbounds float* %arg, i64 %t123 ; <float*> [#uses=1]
646 %t125 = bitcast float* %t124 to <4 x float>* ; <<4 x float>*> [#uses=1]
647 %t126 = load <4 x float>* %t125 ; <<4 x float>> [#uses=1]
648 %t127 = add i64 %t22, 16 ; <i64> [#uses=1]
649 %t128 = add i64 %t22, 3 ; <i64> [#uses=1]
650 %t129 = add i64 %t22, 7 ; <i64> [#uses=1]
651 %t130 = add i64 %t22, 11 ; <i64> [#uses=1]
652 %t131 = add i64 %t22, 15 ; <i64> [#uses=1]
653 %t132 = add i64 %t22, 4 ; <i64> [#uses=1]
654 %t133 = add i64 %t22, 8 ; <i64> [#uses=1]
655 %t134 = add i64 %t22, 12 ; <i64> [#uses=1]
656 %t135 = add i64 %arg6, -16 ; <i64> [#uses=1]
657 %t136 = sub i64 %t135, %t22 ; <i64> [#uses=1]
658 br label %bb137
659
660bb137: ; preds = %bb137, %bb122
661 %t138 = phi i64 [ 0, %bb122 ], [ %t193, %bb137 ] ; <i64> [#uses=3]
662 %t139 = phi <4 x float> [ %t54, %bb122 ], [ %t189, %bb137 ] ; <<4 x float>> [#uses=2]
663 %t140 = phi <4 x float> [ %t50, %bb122 ], [ %t185, %bb137 ] ; <<4 x float>> [#uses=2]
664 %t141 = phi <4 x float> [ %t53, %bb122 ], [ %t190, %bb137 ] ; <<4 x float>> [#uses=2]
665 %t142 = phi <4 x float> [ %t52, %bb122 ], [ %t191, %bb137 ] ; <<4 x float>> [#uses=2]
666 %t143 = phi <4 x float> [ %t126, %bb122 ], [ %t175, %bb137 ] ; <<4 x float>> [#uses=1]
667 %t144 = shl i64 %t138, 4 ; <i64> [#uses=9]
668 %t145 = add i64 %t127, %t144 ; <i64> [#uses=2]
669 %t146 = add i64 %t128, %t144 ; <i64> [#uses=1]
670 %t147 = getelementptr float* %arg, i64 %t146 ; <float*> [#uses=1]
671 %t148 = bitcast float* %t147 to <4 x float>* ; <<4 x float>*> [#uses=1]
672 %t149 = add i64 %t129, %t144 ; <i64> [#uses=1]
673 %t150 = getelementptr float* %arg, i64 %t149 ; <float*> [#uses=1]
674 %t151 = bitcast float* %t150 to <4 x float>* ; <<4 x float>*> [#uses=1]
675 %t152 = add i64 %t130, %t144 ; <i64> [#uses=1]
676 %t153 = getelementptr float* %arg, i64 %t152 ; <float*> [#uses=1]
677 %t154 = bitcast float* %t153 to <4 x float>* ; <<4 x float>*> [#uses=1]
678 %t155 = add i64 %t131, %t144 ; <i64> [#uses=1]
679 %t156 = getelementptr float* %arg, i64 %t155 ; <float*> [#uses=1]
680 %t157 = bitcast float* %t156 to <4 x float>* ; <<4 x float>*> [#uses=1]
681 %t158 = add i64 %t22, %t144 ; <i64> [#uses=1]
682 %t159 = getelementptr float* %arg4, i64 %t158 ; <float*> [#uses=1]
683 %t160 = bitcast float* %t159 to <4 x float>* ; <<4 x float>*> [#uses=1]
684 %t161 = add i64 %t132, %t144 ; <i64> [#uses=1]
685 %t162 = getelementptr float* %arg4, i64 %t161 ; <float*> [#uses=1]
686 %t163 = bitcast float* %t162 to <4 x float>* ; <<4 x float>*> [#uses=1]
687 %t164 = add i64 %t133, %t144 ; <i64> [#uses=1]
688 %t165 = getelementptr float* %arg4, i64 %t164 ; <float*> [#uses=1]
689 %t166 = bitcast float* %t165 to <4 x float>* ; <<4 x float>*> [#uses=1]
690 %t167 = add i64 %t134, %t144 ; <i64> [#uses=1]
691 %t168 = getelementptr float* %arg4, i64 %t167 ; <float*> [#uses=1]
692 %t169 = bitcast float* %t168 to <4 x float>* ; <<4 x float>*> [#uses=1]
693 %t170 = mul i64 %t138, -16 ; <i64> [#uses=1]
694 %t171 = add i64 %t136, %t170 ; <i64> [#uses=2]
695 %t172 = load <4 x float>* %t148 ; <<4 x float>> [#uses=2]
696 %t173 = load <4 x float>* %t151 ; <<4 x float>> [#uses=2]
697 %t174 = load <4 x float>* %t154 ; <<4 x float>> [#uses=2]
698 %t175 = load <4 x float>* %t157 ; <<4 x float>> [#uses=2]
699 %t176 = shufflevector <4 x float> %t143, <4 x float> %t172, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
700 %t177 = shufflevector <4 x float> %t176, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
701 %t178 = shufflevector <4 x float> %t172, <4 x float> %t173, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
702 %t179 = shufflevector <4 x float> %t178, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
703 %t180 = shufflevector <4 x float> %t173, <4 x float> %t174, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
704 %t181 = shufflevector <4 x float> %t180, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
705 %t182 = shufflevector <4 x float> %t174, <4 x float> %t175, <4 x i32> <i32 4, i32 1, i32 2, i32 3> ; <<4 x float>> [#uses=1]
706 %t183 = shufflevector <4 x float> %t182, <4 x float> undef, <4 x i32> <i32 1, i32 2, i32 3, i32 0> ; <<4 x float>> [#uses=1]
707 %t184 = fmul <4 x float> %t177, %t140 ; <<4 x float>> [#uses=1]
708 %t185 = fadd <4 x float> %t140, %t55 ; <<4 x float>> [#uses=2]
709 %t186 = fmul <4 x float> %t179, %t142 ; <<4 x float>> [#uses=1]
710 %t187 = fmul <4 x float> %t181, %t141 ; <<4 x float>> [#uses=1]
711 %t188 = fmul <4 x float> %t183, %t139 ; <<4 x float>> [#uses=1]
712 store <4 x float> %t184, <4 x float>* %t160
713 store <4 x float> %t186, <4 x float>* %t163
714 store <4 x float> %t187, <4 x float>* %t166
715 store <4 x float> %t188, <4 x float>* %t169
716 %t189 = fadd <4 x float> %t139, %t55 ; <<4 x float>> [#uses=1]
717 %t190 = fadd <4 x float> %t141, %t55 ; <<4 x float>> [#uses=1]
718 %t191 = fadd <4 x float> %t142, %t55 ; <<4 x float>> [#uses=1]
719 %t192 = icmp sgt i64 %t171, 15 ; <i1> [#uses=1]
720 %t193 = add i64 %t138, 1 ; <i64> [#uses=1]
721 br i1 %t192, label %bb137, label %bb119
722
723bb194: ; preds = %bb119, %bb118, %bb112
724 %t195 = phi i64 [ %t116, %bb112 ], [ %t171, %bb119 ], [ %t24, %bb118 ] ; <i64> [#uses=2]
725 %t196 = phi <4 x float> [ %t115, %bb112 ], [ %t185, %bb119 ], [ %t50, %bb118 ] ; <<4 x float>> [#uses=1]
726 %t197 = phi float* [ %t114, %bb112 ], [ %t121, %bb119 ], [ %t25, %bb118 ] ; <float*> [#uses=1]
727 %t198 = phi float* [ %t113, %bb112 ], [ %t120, %bb119 ], [ %t26, %bb118 ] ; <float*> [#uses=1]
728 %t199 = extractelement <4 x float> %t196, i32 0 ; <float> [#uses=2]
729 %t200 = icmp sgt i64 %t195, 0 ; <i1> [#uses=1]
730 br i1 %t200, label %bb201, label %bb211
731
732bb201: ; preds = %bb201, %bb194
733 %t202 = phi i64 [ %t209, %bb201 ], [ 0, %bb194 ] ; <i64> [#uses=3]
734 %t203 = phi float [ %t208, %bb201 ], [ %t199, %bb194 ] ; <float> [#uses=2]
735 %t204 = getelementptr float* %t198, i64 %t202 ; <float*> [#uses=1]
736 %t205 = getelementptr float* %t197, i64 %t202 ; <float*> [#uses=1]
737 %t206 = load float* %t204 ; <float> [#uses=1]
738 %t207 = fmul float %t203, %t206 ; <float> [#uses=1]
739 store float %t207, float* %t205
740 %t208 = fadd float %t203, %t8 ; <float> [#uses=2]
741 %t209 = add i64 %t202, 1 ; <i64> [#uses=2]
742 %t210 = icmp eq i64 %t209, %t195 ; <i1> [#uses=1]
743 br i1 %t210, label %bb211, label %bb201
744
745bb211: ; preds = %bb201, %bb194
746 %t212 = phi float [ %t199, %bb194 ], [ %t208, %bb201 ] ; <float> [#uses=1]
747 store float %t212, float* %arg2
748 ret void
749
750bb213: ; preds = %bb
751 ret void
752}