blob: 608e1eaba467e531764a77626b57a05b27be26d8 [file] [log] [blame]
NAKAMURA Takumie4513b12011-11-23 12:18:22 +00001; RUN: llc -mtriple=i686-linux -enable-block-placement < %s | FileCheck %s
Chandler Carruth4162ece2011-10-21 08:01:56 +00002
3declare void @error(i32 %i, i32 %a, i32 %b)
4
Chandler Carruth4a85cc92011-10-21 08:57:37 +00005define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
Chandler Carruth4162ece2011-10-21 08:01:56 +00006; Test a chain of ifs, where the block guarded by the if is error handling code
7; that is not expected to run.
Chandler Carruth4a85cc92011-10-21 08:57:37 +00008; CHECK: test_ifchains:
Chandler Carruth4162ece2011-10-21 08:01:56 +00009; CHECK: %entry
10; CHECK: %else1
11; CHECK: %else2
12; CHECK: %else3
13; CHECK: %else4
14; CHECK: %exit
15; CHECK: %then1
16; CHECK: %then2
17; CHECK: %then3
18; CHECK: %then4
19; CHECK: %then5
20
21entry:
22 %gep1 = getelementptr i32* %a, i32 1
23 %val1 = load i32* %gep1
24 %cond1 = icmp ugt i32 %val1, 1
25 br i1 %cond1, label %then1, label %else1, !prof !0
26
27then1:
28 call void @error(i32 %i, i32 1, i32 %b)
29 br label %else1
30
31else1:
32 %gep2 = getelementptr i32* %a, i32 2
33 %val2 = load i32* %gep2
34 %cond2 = icmp ugt i32 %val2, 2
35 br i1 %cond2, label %then2, label %else2, !prof !0
36
37then2:
38 call void @error(i32 %i, i32 1, i32 %b)
39 br label %else2
40
41else2:
42 %gep3 = getelementptr i32* %a, i32 3
43 %val3 = load i32* %gep3
44 %cond3 = icmp ugt i32 %val3, 3
45 br i1 %cond3, label %then3, label %else3, !prof !0
46
47then3:
48 call void @error(i32 %i, i32 1, i32 %b)
49 br label %else3
50
51else3:
52 %gep4 = getelementptr i32* %a, i32 4
53 %val4 = load i32* %gep4
54 %cond4 = icmp ugt i32 %val4, 4
55 br i1 %cond4, label %then4, label %else4, !prof !0
56
57then4:
58 call void @error(i32 %i, i32 1, i32 %b)
59 br label %else4
60
61else4:
62 %gep5 = getelementptr i32* %a, i32 3
63 %val5 = load i32* %gep5
64 %cond5 = icmp ugt i32 %val5, 3
65 br i1 %cond5, label %then5, label %exit, !prof !0
66
67then5:
68 call void @error(i32 %i, i32 1, i32 %b)
69 br label %exit
70
71exit:
72 ret i32 %b
73}
74
Chandler Carruthdf234352011-11-13 11:20:44 +000075define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
76; Check that we sink cold loop blocks after the hot loop body.
77; CHECK: test_loop_cold_blocks:
78; CHECK: %entry
Chandler Carruth70daea92012-04-16 01:12:56 +000079; CHECK: %unlikely1
80; CHECK: %unlikely2
Chandler Carruthdf234352011-11-13 11:20:44 +000081; CHECK: %body1
82; CHECK: %body2
83; CHECK: %body3
Chandler Carruthdf234352011-11-13 11:20:44 +000084; CHECK: %exit
85
86entry:
87 br label %body1
88
89body1:
90 %iv = phi i32 [ 0, %entry ], [ %next, %body3 ]
91 %base = phi i32 [ 0, %entry ], [ %sum, %body3 ]
92 %unlikelycond1 = icmp slt i32 %base, 42
93 br i1 %unlikelycond1, label %unlikely1, label %body2, !prof !0
94
95unlikely1:
96 call void @error(i32 %i, i32 1, i32 %base)
97 br label %body2
98
99body2:
100 %unlikelycond2 = icmp sgt i32 %base, 21
101 br i1 %unlikelycond2, label %unlikely2, label %body3, !prof !0
102
103unlikely2:
104 call void @error(i32 %i, i32 2, i32 %base)
105 br label %body3
106
107body3:
108 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
109 %0 = load i32* %arrayidx
110 %sum = add nsw i32 %0, %base
111 %next = add i32 %iv, 1
112 %exitcond = icmp eq i32 %next, %i
113 br i1 %exitcond, label %exit, label %body1
114
115exit:
116 ret i32 %sum
117}
118
Chandler Carruth4162ece2011-10-21 08:01:56 +0000119!0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
Chandler Carruth4a85cc92011-10-21 08:57:37 +0000120
Chandler Carruthdf234352011-11-13 11:20:44 +0000121define i32 @test_loop_early_exits(i32 %i, i32* %a) {
122; Check that we sink early exit blocks out of loop bodies.
123; CHECK: test_loop_early_exits:
124; CHECK: %entry
Chandler Carruth16295fc2012-04-16 09:31:23 +0000125; CHECK: %body1
Chandler Carruthdf234352011-11-13 11:20:44 +0000126; CHECK: %body2
127; CHECK: %body3
128; CHECK: %body4
Chandler Carruth16295fc2012-04-16 09:31:23 +0000129; CHECK: %exit
Chandler Carruthdf234352011-11-13 11:20:44 +0000130; CHECK: %bail1
131; CHECK: %bail2
132; CHECK: %bail3
133
134entry:
135 br label %body1
136
137body1:
138 %iv = phi i32 [ 0, %entry ], [ %next, %body4 ]
139 %base = phi i32 [ 0, %entry ], [ %sum, %body4 ]
140 %bailcond1 = icmp eq i32 %base, 42
141 br i1 %bailcond1, label %bail1, label %body2
142
143bail1:
144 ret i32 -1
145
146body2:
147 %bailcond2 = icmp eq i32 %base, 43
148 br i1 %bailcond2, label %bail2, label %body3
149
150bail2:
151 ret i32 -2
152
153body3:
154 %bailcond3 = icmp eq i32 %base, 44
155 br i1 %bailcond3, label %bail3, label %body4
156
157bail3:
158 ret i32 -3
159
160body4:
161 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
162 %0 = load i32* %arrayidx
163 %sum = add nsw i32 %0, %base
164 %next = add i32 %iv, 1
165 %exitcond = icmp eq i32 %next, %i
166 br i1 %exitcond, label %exit, label %body1
167
168exit:
169 ret i32 %sum
170}
171
Chandler Carruth2e38cf92011-11-27 00:38:03 +0000172define i32 @test_loop_rotate(i32 %i, i32* %a) {
173; Check that we rotate conditional exits from the loop to the bottom of the
174; loop, eliminating unconditional branches to the top.
175; CHECK: test_loop_rotate:
176; CHECK: %entry
177; CHECK: %body1
178; CHECK: %body0
179; CHECK: %exit
180
181entry:
182 br label %body0
183
184body0:
185 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
186 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
187 %next = add i32 %iv, 1
188 %exitcond = icmp eq i32 %next, %i
189 br i1 %exitcond, label %exit, label %body1
190
191body1:
192 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
193 %0 = load i32* %arrayidx
194 %sum = add nsw i32 %0, %base
195 %bailcond1 = icmp eq i32 %sum, 42
196 br label %body0
197
198exit:
199 ret i32 %base
200}
201
Chandler Carruth16295fc2012-04-16 09:31:23 +0000202define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
203; Check that we don't try to rotate a loop which is already laid out with
204; fallthrough opportunities into the top and out of the bottom.
205; CHECK: test_no_loop_rotate:
206; CHECK: %entry
207; CHECK: %body0
208; CHECK: %body1
209; CHECK: %exit
210
211entry:
212 br label %body0
213
214body0:
215 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
216 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
217 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
218 %0 = load i32* %arrayidx
219 %sum = add nsw i32 %0, %base
220 %bailcond1 = icmp eq i32 %sum, 42
221 br i1 %bailcond1, label %exit, label %body1
222
223body1:
224 %next = add i32 %iv, 1
225 %exitcond = icmp eq i32 %next, %i
226 br i1 %exitcond, label %exit, label %body0
227
228exit:
229 ret i32 %base
230}
231
Chandler Carruth2eb5a742011-11-27 09:22:53 +0000232define void @test_loop_rotate_reversed_blocks() {
233; This test case (greatly reduced from an Olden bencmark) ensures that the loop
234; rotate implementation doesn't assume that loops are laid out in a particular
235; order. The first loop will get split into two basic blocks, with the loop
236; header coming after the loop latch.
237;
238; CHECK: test_loop_rotate_reversed_blocks
239; CHECK: %entry
240; Look for a jump into the middle of the loop, and no branches mid-way.
241; CHECK: jmp
242; CHECK: %loop1
243; CHECK-NOT: j{{\w*}} .LBB{{.*}}
244; CHECK: %loop1
245; CHECK: je
246
247entry:
248 %cond1 = load volatile i1* undef
249 br i1 %cond1, label %loop2.preheader, label %loop1
250
251loop1:
252 call i32 @f()
253 %cond2 = load volatile i1* undef
254 br i1 %cond2, label %loop2.preheader, label %loop1
255
256loop2.preheader:
257 call i32 @f()
258 %cond3 = load volatile i1* undef
259 br i1 %cond3, label %exit, label %loop2
260
261loop2:
262 call i32 @f()
263 %cond4 = load volatile i1* undef
264 br i1 %cond4, label %exit, label %loop2
265
266exit:
267 ret void
268}
269
Chandler Carruth4a85cc92011-10-21 08:57:37 +0000270define i32 @test_loop_align(i32 %i, i32* %a) {
271; Check that we provide basic loop body alignment with the block placement
272; pass.
273; CHECK: test_loop_align:
274; CHECK: %entry
Chandler Carruth7555c402011-10-21 16:41:39 +0000275; CHECK: .align [[ALIGN:[0-9]+]],
Chandler Carruth4a85cc92011-10-21 08:57:37 +0000276; CHECK-NEXT: %body
277; CHECK: %exit
278
279entry:
280 br label %body
281
282body:
283 %iv = phi i32 [ 0, %entry ], [ %next, %body ]
284 %base = phi i32 [ 0, %entry ], [ %sum, %body ]
285 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
286 %0 = load i32* %arrayidx
287 %sum = add nsw i32 %0, %base
288 %next = add i32 %iv, 1
289 %exitcond = icmp eq i32 %next, %i
290 br i1 %exitcond, label %exit, label %body
291
292exit:
293 ret i32 %sum
294}
295
296define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
297; Check that we provide nested loop body alignment.
298; CHECK: test_nested_loop_align:
299; CHECK: %entry
Chandler Carruth7555c402011-10-21 16:41:39 +0000300; CHECK: .align [[ALIGN]],
Chandler Carruthdf234352011-11-13 11:20:44 +0000301; CHECK-NEXT: %loop.body.1
Chandler Carruth7555c402011-10-21 16:41:39 +0000302; CHECK: .align [[ALIGN]],
Chandler Carruth4a85cc92011-10-21 08:57:37 +0000303; CHECK-NEXT: %inner.loop.body
304; CHECK-NOT: .align
Chandler Carruth4a85cc92011-10-21 08:57:37 +0000305; CHECK: %exit
306
307entry:
308 br label %loop.body.1
309
310loop.body.1:
311 %iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
312 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
313 %bidx = load i32* %arrayidx
314 br label %inner.loop.body
315
316inner.loop.body:
317 %inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
318 %base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
319 %scaled_idx = mul i32 %bidx, %iv
320 %inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
321 %0 = load i32* %inner.arrayidx
322 %sum = add nsw i32 %0, %base
323 %inner.next = add i32 %iv, 1
324 %inner.exitcond = icmp eq i32 %inner.next, %i
325 br i1 %inner.exitcond, label %loop.body.2, label %inner.loop.body
326
327loop.body.2:
328 %next = add i32 %iv, 1
329 %exitcond = icmp eq i32 %next, %i
330 br i1 %exitcond, label %exit, label %loop.body.1
331
332exit:
333 ret i32 %sum
334}
Chandler Carruthb5856c82011-11-14 00:00:35 +0000335
336define void @unnatural_cfg1() {
337; Test that we can handle a loop with an inner unnatural loop at the end of
338; a function. This is a gross CFG reduced out of the single source GCC.
339; CHECK: unnatural_cfg1
340; CHECK: %entry
341; CHECK: %loop.body1
Chandler Carruthb5856c82011-11-14 00:00:35 +0000342; CHECK: %loop.body2
Chandler Carruth3273c892011-11-15 06:26:43 +0000343; CHECK: %loop.body3
Chandler Carruthb5856c82011-11-14 00:00:35 +0000344
345entry:
346 br label %loop.header
347
348loop.header:
349 br label %loop.body1
350
351loop.body1:
352 br i1 undef, label %loop.body3, label %loop.body2
353
354loop.body2:
355 %ptr = load i32** undef, align 4
356 br label %loop.body3
357
358loop.body3:
359 %myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
360 %bcmyptr = bitcast i32* %myptr to i32*
361 %val = load i32* %bcmyptr, align 4
362 %comp = icmp eq i32 %val, 48
363 br i1 %comp, label %loop.body4, label %loop.body5
364
365loop.body4:
366 br i1 undef, label %loop.header, label %loop.body5
367
368loop.body5:
369 %ptr2 = load i32** undef, align 4
370 br label %loop.body3
371}
Chandler Carruth2770c142011-11-14 08:50:16 +0000372
Chandler Carruth3273c892011-11-15 06:26:43 +0000373define void @unnatural_cfg2() {
374; Test that we can handle a loop with a nested natural loop *and* an unnatural
375; loop. This was reduced from a crash on block placement when run over
376; single-source GCC.
377; CHECK: unnatural_cfg2
378; CHECK: %entry
Chandler Carruth3273c892011-11-15 06:26:43 +0000379; CHECK: %loop.body1
380; CHECK: %loop.body2
381; CHECK: %loop.body3
382; CHECK: %loop.inner1.begin
383; The end block is folded with %loop.body3...
384; CHECK-NOT: %loop.inner1.end
385; CHECK: %loop.body4
386; CHECK: %loop.inner2.begin
387; The loop.inner2.end block is folded
Chandler Carruth70daea92012-04-16 01:12:56 +0000388; CHECK: %loop.header
Chandler Carruth3273c892011-11-15 06:26:43 +0000389; CHECK: %bail
390
391entry:
392 br label %loop.header
393
394loop.header:
395 %comp0 = icmp eq i32* undef, null
396 br i1 %comp0, label %bail, label %loop.body1
397
398loop.body1:
399 %val0 = load i32** undef, align 4
400 br i1 undef, label %loop.body2, label %loop.inner1.begin
401
402loop.body2:
403 br i1 undef, label %loop.body4, label %loop.body3
404
405loop.body3:
406 %ptr1 = getelementptr inbounds i32* %val0, i32 0
407 %castptr1 = bitcast i32* %ptr1 to i32**
408 %val1 = load i32** %castptr1, align 4
409 br label %loop.inner1.begin
410
411loop.inner1.begin:
412 %valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
413 %castval = bitcast i32* %valphi to i32*
414 %comp1 = icmp eq i32 undef, 48
415 br i1 %comp1, label %loop.inner1.end, label %loop.body4
416
417loop.inner1.end:
418 %ptr2 = getelementptr inbounds i32* %valphi, i32 0
419 %castptr2 = bitcast i32* %ptr2 to i32**
420 %val2 = load i32** %castptr2, align 4
421 br label %loop.inner1.begin
422
423loop.body4.dead:
424 br label %loop.body4
425
426loop.body4:
427 %comp2 = icmp ult i32 undef, 3
428 br i1 %comp2, label %loop.inner2.begin, label %loop.end
429
430loop.inner2.begin:
431 br i1 false, label %loop.end, label %loop.inner2.end
432
433loop.inner2.end:
434 %comp3 = icmp eq i32 undef, 1769472
435 br i1 %comp3, label %loop.end, label %loop.inner2.begin
436
437loop.end:
438 br label %loop.header
439
440bail:
441 unreachable
442}
443
Chandler Carruth2770c142011-11-14 08:50:16 +0000444define i32 @problematic_switch() {
445; This function's CFG caused overlow in the machine branch probability
446; calculation, triggering asserts. Make sure we don't crash on it.
447; CHECK: problematic_switch
448
449entry:
450 switch i32 undef, label %exit [
451 i32 879, label %bogus
452 i32 877, label %step
453 i32 876, label %step
454 i32 875, label %step
455 i32 874, label %step
456 i32 873, label %step
457 i32 872, label %step
458 i32 868, label %step
459 i32 867, label %step
460 i32 866, label %step
461 i32 861, label %step
462 i32 860, label %step
463 i32 856, label %step
464 i32 855, label %step
465 i32 854, label %step
466 i32 831, label %step
467 i32 830, label %step
468 i32 829, label %step
469 i32 828, label %step
470 i32 815, label %step
471 i32 814, label %step
472 i32 811, label %step
473 i32 806, label %step
474 i32 805, label %step
475 i32 804, label %step
476 i32 803, label %step
477 i32 802, label %step
478 i32 801, label %step
479 i32 800, label %step
480 i32 799, label %step
481 i32 798, label %step
482 i32 797, label %step
483 i32 796, label %step
484 i32 795, label %step
485 ]
486bogus:
487 unreachable
488step:
489 br label %exit
490exit:
491 %merge = phi i32 [ 3, %step ], [ 6, %entry ]
492 ret i32 %merge
493}
Chandler Carruth03300ec2011-11-19 10:26:02 +0000494
495define void @fpcmp_unanalyzable_branch(i1 %cond) {
Chandler Carruth29012432011-11-20 09:30:40 +0000496; This function's CFG contains an unanalyzable branch that is likely to be
497; split due to having a different high-probability predecessor.
498; CHECK: fpcmp_unanalyzable_branch
499; CHECK: %entry
500; CHECK: %exit
501; CHECK-NOT: %if.then
502; CHECK-NOT: %if.end
503; CHECK-NOT: jne
504; CHECK-NOT: jnp
505; CHECK: jne
506; CHECK-NEXT: jnp
507; CHECK-NEXT: %if.then
508
Chandler Carruth03300ec2011-11-19 10:26:02 +0000509entry:
Chandler Carruth29012432011-11-20 09:30:40 +0000510; Note that this branch must be strongly biased toward
511; 'entry.if.then_crit_edge' to ensure that we would try to form a chain for
512; 'entry' -> 'entry.if.then_crit_edge' -> 'if.then'. It is the last edge in that
513; chain which would violate the unanalyzable branch in 'exit', but we won't even
514; try this trick unless 'if.then' is believed to almost always be reached from
515; 'entry.if.then_crit_edge'.
516 br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
Chandler Carruth03300ec2011-11-19 10:26:02 +0000517
518entry.if.then_crit_edge:
519 %.pre14 = load i8* undef, align 1, !tbaa !0
520 br label %if.then
521
522lor.lhs.false:
523 br i1 undef, label %if.end, label %exit
524
525exit:
526 %cmp.i = fcmp une double 0.000000e+00, undef
527 br i1 %cmp.i, label %if.then, label %if.end
528
529if.then:
530 %0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
531 %1 = and i8 %0, 1
532 store i8 %1, i8* undef, align 4, !tbaa !0
533 br label %if.end
534
535if.end:
536 ret void
537}
Chandler Carruth29012432011-11-20 09:30:40 +0000538
539!1 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
Chandler Carruthb0dadb92011-11-20 11:22:06 +0000540
541declare i32 @f()
542declare i32 @g()
543declare i32 @h(i32 %x)
544
545define i32 @test_global_cfg_break_profitability() {
546; Check that our metrics for the profitability of a CFG break are global rather
547; than local. A successor may be very hot, but if the current block isn't, it
548; doesn't matter. Within this test the 'then' block is slightly warmer than the
549; 'else' block, but not nearly enough to merit merging it with the exit block
550; even though the probability of 'then' branching to the 'exit' block is very
551; high.
552; CHECK: test_global_cfg_break_profitability
NAKAMURA Takumi742e5cf2011-11-20 12:49:45 +0000553; CHECK: calll {{_?}}f
554; CHECK: calll {{_?}}g
555; CHECK: calll {{_?}}h
Chandler Carruthb0dadb92011-11-20 11:22:06 +0000556; CHECK: ret
557
558entry:
559 br i1 undef, label %then, label %else, !prof !2
560
561then:
562 %then.result = call i32 @f()
563 br label %exit
564
565else:
566 %else.result = call i32 @g()
567 br label %exit
568
569exit:
570 %result = phi i32 [ %then.result, %then ], [ %else.result, %else ]
571 %result2 = call i32 @h(i32 %result)
572 ret i32 %result
573}
574
575!2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
Chandler Carruth3b7b2092011-11-22 13:13:16 +0000576
577declare i32 @__gxx_personality_v0(...)
578
579define void @test_eh_lpad_successor() {
580; Some times the landing pad ends up as the first successor of an invoke block.
581; When this happens, a strange result used to fall out of updateTerminators: we
582; didn't correctly locate the fallthrough successor, assuming blindly that the
583; first one was the fallthrough successor. As a result, we would add an
584; erroneous jump to the landing pad thinking *that* was the default successor.
585; CHECK: test_eh_lpad_successor
586; CHECK: %entry
587; CHECK-NOT: jmp
588; CHECK: %loop
589
590entry:
591 invoke i32 @f() to label %preheader unwind label %lpad
592
593preheader:
594 br label %loop
595
596lpad:
597 %lpad.val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
598 cleanup
599 resume { i8*, i32 } %lpad.val
600
601loop:
602 br label %loop
603}
Chandler Carruth47fb9542011-11-23 03:03:21 +0000604
Chandler Carruth521fc5b2011-11-23 08:23:54 +0000605declare void @fake_throw() noreturn
606
607define void @test_eh_throw() {
608; For blocks containing a 'throw' (or similar functionality), we have
609; a no-return invoke. In this case, only EH successors will exist, and
610; fallthrough simply won't occur. Make sure we don't crash trying to update
611; terminators for such constructs.
612;
613; CHECK: test_eh_throw
614; CHECK: %entry
615; CHECK: %cleanup
616
617entry:
618 invoke void @fake_throw() to label %continue unwind label %cleanup
619
620continue:
621 unreachable
622
623cleanup:
624 %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
625 cleanup
626 unreachable
627}
628
Chandler Carruth47fb9542011-11-23 03:03:21 +0000629define void @test_unnatural_cfg_backwards_inner_loop() {
630; Test that when we encounter an unnatural CFG structure after having formed
631; a chain for an inner loop which happened to be laid out backwards we don't
632; attempt to merge onto the wrong end of the inner loop just because we find it
633; first. This was reduced from a crasher in GCC's single source.
634;
635; CHECK: test_unnatural_cfg_backwards_inner_loop
636; CHECK: %entry
637; CHECK: %body
Chandler Carruth47fb9542011-11-23 03:03:21 +0000638; CHECK: %loop2b
Chandler Carruth2eb5a742011-11-27 09:22:53 +0000639; CHECK: %loop1
Chandler Carruth47fb9542011-11-23 03:03:21 +0000640; CHECK: %loop2a
641
642entry:
643 br i1 undef, label %loop2a, label %body
644
645body:
646 br label %loop2a
647
648loop1:
649 %next.load = load i32** undef
650 br i1 %comp.a, label %loop2a, label %loop2b
651
652loop2a:
653 %var = phi i32* [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
654 %next.var = phi i32* [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
655 %comp.a = icmp eq i32* %var, null
656 br label %loop3
657
658loop2b:
659 %gep = getelementptr inbounds i32* %var.phi, i32 0
660 %next.ptr = bitcast i32* %gep to i32**
661 store i32* %next.phi, i32** %next.ptr
662 br label %loop3
663
664loop3:
665 %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
666 %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
667 br label %loop1
668}
Chandler Carruth598894f2011-11-23 10:35:36 +0000669
670define void @unanalyzable_branch_to_loop_header() {
671; Ensure that we can handle unanalyzable branches into loop headers. We
672; pre-form chains for unanalyzable branches, and will find the tail end of that
673; at the start of the loop. This function uses floating point comparison
674; fallthrough because that happens to always produce unanalyzable branches on
675; x86.
676;
677; CHECK: unanalyzable_branch_to_loop_header
678; CHECK: %entry
679; CHECK: %loop
680; CHECK: %exit
681
682entry:
683 %cmp = fcmp une double 0.000000e+00, undef
684 br i1 %cmp, label %loop, label %exit
685
686loop:
687 %cond = icmp eq i8 undef, 42
688 br i1 %cond, label %exit, label %loop
689
690exit:
691 ret void
692}
693
Chandler Carrutha2deea12011-11-24 08:46:04 +0000694define void @unanalyzable_branch_to_best_succ(i1 %cond) {
695; Ensure that we can handle unanalyzable branches where the destination block
696; gets selected as the optimal sucessor to merge.
697;
698; CHECK: unanalyzable_branch_to_best_succ
699; CHECK: %entry
700; CHECK: %foo
701; CHECK: %bar
702; CHECK: %exit
703
704entry:
705 ; Bias this branch toward bar to ensure we form that chain.
706 br i1 %cond, label %bar, label %foo, !prof !1
707
708foo:
709 %cmp = fcmp une double 0.000000e+00, undef
710 br i1 %cmp, label %bar, label %exit
711
712bar:
713 call i32 @f()
714 br label %exit
715
716exit:
717 ret void
718}
719
720define void @unanalyzable_branch_to_free_block(float %x) {
721; Ensure that we can handle unanalyzable branches where the destination block
722; gets selected as the best free block in the CFG.
723;
724; CHECK: unanalyzable_branch_to_free_block
725; CHECK: %entry
726; CHECK: %a
727; CHECK: %b
728; CHECK: %c
729; CHECK: %exit
730
731entry:
732 br i1 undef, label %a, label %b
733
734a:
735 call i32 @f()
736 br label %c
737
738b:
739 %cmp = fcmp une float %x, undef
740 br i1 %cmp, label %c, label %exit
741
742c:
743 call i32 @g()
744 br label %exit
745
746exit:
747 ret void
748}
749
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000750define void @many_unanalyzable_branches() {
751; Ensure that we don't crash as we're building up many unanalyzable branches,
752; blocks, and loops.
753;
754; CHECK: many_unanalyzable_branches
755; CHECK: %entry
756; CHECK: %exit
757
758entry:
759 br label %0
760
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000761 %val0 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000762 %cmp0 = fcmp une float %val0, undef
763 br i1 %cmp0, label %1, label %0
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000764 %val1 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000765 %cmp1 = fcmp une float %val1, undef
766 br i1 %cmp1, label %2, label %1
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000767 %val2 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000768 %cmp2 = fcmp une float %val2, undef
769 br i1 %cmp2, label %3, label %2
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000770 %val3 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000771 %cmp3 = fcmp une float %val3, undef
772 br i1 %cmp3, label %4, label %3
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000773 %val4 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000774 %cmp4 = fcmp une float %val4, undef
775 br i1 %cmp4, label %5, label %4
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000776 %val5 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000777 %cmp5 = fcmp une float %val5, undef
778 br i1 %cmp5, label %6, label %5
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000779 %val6 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000780 %cmp6 = fcmp une float %val6, undef
781 br i1 %cmp6, label %7, label %6
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000782 %val7 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000783 %cmp7 = fcmp une float %val7, undef
784 br i1 %cmp7, label %8, label %7
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000785 %val8 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000786 %cmp8 = fcmp une float %val8, undef
787 br i1 %cmp8, label %9, label %8
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000788 %val9 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000789 %cmp9 = fcmp une float %val9, undef
790 br i1 %cmp9, label %10, label %9
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000791 %val10 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000792 %cmp10 = fcmp une float %val10, undef
793 br i1 %cmp10, label %11, label %10
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000794 %val11 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000795 %cmp11 = fcmp une float %val11, undef
796 br i1 %cmp11, label %12, label %11
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000797 %val12 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000798 %cmp12 = fcmp une float %val12, undef
799 br i1 %cmp12, label %13, label %12
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000800 %val13 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000801 %cmp13 = fcmp une float %val13, undef
802 br i1 %cmp13, label %14, label %13
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000803 %val14 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000804 %cmp14 = fcmp une float %val14, undef
805 br i1 %cmp14, label %15, label %14
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000806 %val15 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000807 %cmp15 = fcmp une float %val15, undef
808 br i1 %cmp15, label %16, label %15
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000809 %val16 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000810 %cmp16 = fcmp une float %val16, undef
811 br i1 %cmp16, label %17, label %16
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000812 %val17 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000813 %cmp17 = fcmp une float %val17, undef
814 br i1 %cmp17, label %18, label %17
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000815 %val18 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000816 %cmp18 = fcmp une float %val18, undef
817 br i1 %cmp18, label %19, label %18
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000818 %val19 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000819 %cmp19 = fcmp une float %val19, undef
820 br i1 %cmp19, label %20, label %19
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000821 %val20 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000822 %cmp20 = fcmp une float %val20, undef
823 br i1 %cmp20, label %21, label %20
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000824 %val21 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000825 %cmp21 = fcmp une float %val21, undef
826 br i1 %cmp21, label %22, label %21
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000827 %val22 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000828 %cmp22 = fcmp une float %val22, undef
829 br i1 %cmp22, label %23, label %22
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000830 %val23 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000831 %cmp23 = fcmp une float %val23, undef
832 br i1 %cmp23, label %24, label %23
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000833 %val24 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000834 %cmp24 = fcmp une float %val24, undef
835 br i1 %cmp24, label %25, label %24
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000836 %val25 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000837 %cmp25 = fcmp une float %val25, undef
838 br i1 %cmp25, label %26, label %25
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000839 %val26 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000840 %cmp26 = fcmp une float %val26, undef
841 br i1 %cmp26, label %27, label %26
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000842 %val27 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000843 %cmp27 = fcmp une float %val27, undef
844 br i1 %cmp27, label %28, label %27
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000845 %val28 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000846 %cmp28 = fcmp une float %val28, undef
847 br i1 %cmp28, label %29, label %28
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000848 %val29 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000849 %cmp29 = fcmp une float %val29, undef
850 br i1 %cmp29, label %30, label %29
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000851 %val30 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000852 %cmp30 = fcmp une float %val30, undef
853 br i1 %cmp30, label %31, label %30
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000854 %val31 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000855 %cmp31 = fcmp une float %val31, undef
856 br i1 %cmp31, label %32, label %31
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000857 %val32 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000858 %cmp32 = fcmp une float %val32, undef
859 br i1 %cmp32, label %33, label %32
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000860 %val33 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000861 %cmp33 = fcmp une float %val33, undef
862 br i1 %cmp33, label %34, label %33
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000863 %val34 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000864 %cmp34 = fcmp une float %val34, undef
865 br i1 %cmp34, label %35, label %34
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000866 %val35 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000867 %cmp35 = fcmp une float %val35, undef
868 br i1 %cmp35, label %36, label %35
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000869 %val36 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000870 %cmp36 = fcmp une float %val36, undef
871 br i1 %cmp36, label %37, label %36
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000872 %val37 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000873 %cmp37 = fcmp une float %val37, undef
874 br i1 %cmp37, label %38, label %37
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000875 %val38 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000876 %cmp38 = fcmp une float %val38, undef
877 br i1 %cmp38, label %39, label %38
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000878 %val39 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000879 %cmp39 = fcmp une float %val39, undef
880 br i1 %cmp39, label %40, label %39
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000881 %val40 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000882 %cmp40 = fcmp une float %val40, undef
883 br i1 %cmp40, label %41, label %40
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000884 %val41 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000885 %cmp41 = fcmp une float %val41, undef
886 br i1 %cmp41, label %42, label %41
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000887 %val42 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000888 %cmp42 = fcmp une float %val42, undef
889 br i1 %cmp42, label %43, label %42
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000890 %val43 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000891 %cmp43 = fcmp une float %val43, undef
892 br i1 %cmp43, label %44, label %43
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000893 %val44 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000894 %cmp44 = fcmp une float %val44, undef
895 br i1 %cmp44, label %45, label %44
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000896 %val45 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000897 %cmp45 = fcmp une float %val45, undef
898 br i1 %cmp45, label %46, label %45
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000899 %val46 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000900 %cmp46 = fcmp une float %val46, undef
901 br i1 %cmp46, label %47, label %46
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000902 %val47 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000903 %cmp47 = fcmp une float %val47, undef
904 br i1 %cmp47, label %48, label %47
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000905 %val48 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000906 %cmp48 = fcmp une float %val48, undef
907 br i1 %cmp48, label %49, label %48
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000908 %val49 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000909 %cmp49 = fcmp une float %val49, undef
910 br i1 %cmp49, label %50, label %49
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000911 %val50 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000912 %cmp50 = fcmp une float %val50, undef
913 br i1 %cmp50, label %51, label %50
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000914 %val51 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000915 %cmp51 = fcmp une float %val51, undef
916 br i1 %cmp51, label %52, label %51
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000917 %val52 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000918 %cmp52 = fcmp une float %val52, undef
919 br i1 %cmp52, label %53, label %52
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000920 %val53 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000921 %cmp53 = fcmp une float %val53, undef
922 br i1 %cmp53, label %54, label %53
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000923 %val54 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000924 %cmp54 = fcmp une float %val54, undef
925 br i1 %cmp54, label %55, label %54
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000926 %val55 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000927 %cmp55 = fcmp une float %val55, undef
928 br i1 %cmp55, label %56, label %55
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000929 %val56 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000930 %cmp56 = fcmp une float %val56, undef
931 br i1 %cmp56, label %57, label %56
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000932 %val57 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000933 %cmp57 = fcmp une float %val57, undef
934 br i1 %cmp57, label %58, label %57
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000935 %val58 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000936 %cmp58 = fcmp une float %val58, undef
937 br i1 %cmp58, label %59, label %58
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000938 %val59 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000939 %cmp59 = fcmp une float %val59, undef
940 br i1 %cmp59, label %60, label %59
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000941 %val60 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000942 %cmp60 = fcmp une float %val60, undef
943 br i1 %cmp60, label %61, label %60
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000944 %val61 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000945 %cmp61 = fcmp une float %val61, undef
946 br i1 %cmp61, label %62, label %61
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000947 %val62 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000948 %cmp62 = fcmp une float %val62, undef
949 br i1 %cmp62, label %63, label %62
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000950 %val63 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000951 %cmp63 = fcmp une float %val63, undef
952 br i1 %cmp63, label %64, label %63
Chris Lattnerd2bf4322011-11-27 06:54:59 +0000953 %val64 = load volatile float* undef
Chandler Carruth4aae4f92011-11-24 11:23:15 +0000954 %cmp64 = fcmp une float %val64, undef
955 br i1 %cmp64, label %65, label %64
956
957 br label %exit
958exit:
959 ret void
960}
Chandler Carruth70daea92012-04-16 01:12:56 +0000961
962define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
963; This test case comes from the heapsort benchmark, and exemplifies several
964; important aspects to block placement in the presence of loops:
965; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
966; a fallthrough.
967; 2) The exiting edge from the loop which is rotated to be laid out at the
968; bottom of the loop needs to be exiting into the nearest enclosing loop (to
969; which there is an exit). Otherwise, we force that enclosing loop into
970; strange layouts that are siginificantly less efficient, often times maing
971; it discontiguous.
972;
973; CHECK: @benchmark_heapsort
974; CHECK: %entry
975; First rotated loop top.
976; CHECK: .align
Chandler Carruthe773e8c2012-04-16 13:33:36 +0000977; CHECK: %while.end
978; CHECK: %for.cond
979; CHECK: %if.then
980; CHECK: %if.else
Chandler Carruth70daea92012-04-16 01:12:56 +0000981; CHECK: %if.end10
982; Second rotated loop top
983; CHECK: .align
Chandler Carruthe773e8c2012-04-16 13:33:36 +0000984; CHECK: %if.then24
Chandler Carruth70daea92012-04-16 01:12:56 +0000985; CHECK: %while.cond.outer
986; Third rotated loop top
987; CHECK: .align
988; CHECK: %while.cond
989; CHECK: %while.body
990; CHECK: %land.lhs.true
991; CHECK: %if.then19
992; CHECK: %if.then19
Chandler Carruth70daea92012-04-16 01:12:56 +0000993; CHECK: %if.then8
994; CHECK: ret
995
996entry:
997 %shr = ashr i32 %n, 1
998 %add = add nsw i32 %shr, 1
999 %arrayidx3 = getelementptr inbounds double* %ra, i64 1
1000 br label %for.cond
1001
1002for.cond:
1003 %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
1004 %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
1005 %cmp = icmp sgt i32 %l.0, 1
1006 br i1 %cmp, label %if.then, label %if.else
1007
1008if.then:
1009 %dec = add nsw i32 %l.0, -1
1010 %idxprom = sext i32 %dec to i64
1011 %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
1012 %0 = load double* %arrayidx, align 8
1013 br label %if.end10
1014
1015if.else:
1016 %idxprom1 = sext i32 %ir.0 to i64
1017 %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
1018 %1 = load double* %arrayidx2, align 8
1019 %2 = load double* %arrayidx3, align 8
1020 store double %2, double* %arrayidx2, align 8
1021 %dec6 = add nsw i32 %ir.0, -1
1022 %cmp7 = icmp eq i32 %dec6, 1
1023 br i1 %cmp7, label %if.then8, label %if.end10
1024
1025if.then8:
1026 store double %1, double* %arrayidx3, align 8
1027 ret void
1028
1029if.end10:
1030 %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
1031 %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
1032 %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
1033 %add31 = add nsw i32 %ir.1, 1
1034 br label %while.cond.outer
1035
1036while.cond.outer:
1037 %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
1038 %j.0.ph = shl i32 %j.0.ph.in, 1
1039 br label %while.cond
1040
1041while.cond:
1042 %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
1043 %cmp11 = icmp sgt i32 %j.0, %ir.1
1044 br i1 %cmp11, label %while.end, label %while.body
1045
1046while.body:
1047 %cmp12 = icmp slt i32 %j.0, %ir.1
1048 br i1 %cmp12, label %land.lhs.true, label %if.end20
1049
1050land.lhs.true:
1051 %idxprom13 = sext i32 %j.0 to i64
1052 %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
1053 %3 = load double* %arrayidx14, align 8
1054 %add15 = add nsw i32 %j.0, 1
1055 %idxprom16 = sext i32 %add15 to i64
1056 %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
1057 %4 = load double* %arrayidx17, align 8
1058 %cmp18 = fcmp olt double %3, %4
1059 br i1 %cmp18, label %if.then19, label %if.end20
1060
1061if.then19:
1062 br label %if.end20
1063
1064if.end20:
1065 %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
1066 %idxprom21 = sext i32 %j.1 to i64
1067 %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
1068 %5 = load double* %arrayidx22, align 8
1069 %cmp23 = fcmp olt double %rra.0, %5
1070 br i1 %cmp23, label %if.then24, label %while.cond
1071
1072if.then24:
1073 %idxprom27 = sext i32 %j.0.ph.in to i64
1074 %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
1075 store double %5, double* %arrayidx28, align 8
1076 br label %while.cond.outer
1077
1078while.end:
1079 %idxprom33 = sext i32 %j.0.ph.in to i64
1080 %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
1081 store double %rra.0, double* %arrayidx34, align 8
1082 br label %for.cond
1083}