1 ; RUN: llc -mtriple=i686-linux < %s | FileCheck %s
3 declare void @error(i32 %i, i32 %a, i32 %b)
5 define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
6 ; Test a chain of ifs, where the block guarded by the if is error handling code
7 ; that is not expected to run.
8 ; CHECK: test_ifchains:
22 %gep1 = getelementptr i32* %a, i32 1
23 %val1 = load i32* %gep1
24 %cond1 = icmp ugt i32 %val1, 1
25 br i1 %cond1, label %then1, label %else1, !prof !0
28 call void @error(i32 %i, i32 1, i32 %b)
32 %gep2 = getelementptr i32* %a, i32 2
33 %val2 = load i32* %gep2
34 %cond2 = icmp ugt i32 %val2, 2
35 br i1 %cond2, label %then2, label %else2, !prof !0
38 call void @error(i32 %i, i32 1, i32 %b)
42 %gep3 = getelementptr i32* %a, i32 3
43 %val3 = load i32* %gep3
44 %cond3 = icmp ugt i32 %val3, 3
45 br i1 %cond3, label %then3, label %else3, !prof !0
48 call void @error(i32 %i, i32 1, i32 %b)
52 %gep4 = getelementptr i32* %a, i32 4
53 %val4 = load i32* %gep4
54 %cond4 = icmp ugt i32 %val4, 4
55 br i1 %cond4, label %then4, label %else4, !prof !0
58 call void @error(i32 %i, i32 1, i32 %b)
62 %gep5 = getelementptr i32* %a, i32 3
63 %val5 = load i32* %gep5
64 %cond5 = icmp ugt i32 %val5, 3
65 br i1 %cond5, label %then5, label %exit, !prof !0
68 call void @error(i32 %i, i32 1, i32 %b)
75 define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
76 ; Check that we sink cold loop blocks after the hot loop body.
77 ; CHECK: test_loop_cold_blocks:
90 %iv = phi i32 [ 0, %entry ], [ %next, %body3 ]
91 %base = phi i32 [ 0, %entry ], [ %sum, %body3 ]
92 %unlikelycond1 = icmp slt i32 %base, 42
93 br i1 %unlikelycond1, label %unlikely1, label %body2, !prof !0
96 call void @error(i32 %i, i32 1, i32 %base)
100 %unlikelycond2 = icmp sgt i32 %base, 21
101 br i1 %unlikelycond2, label %unlikely2, label %body3, !prof !0
104 call void @error(i32 %i, i32 2, i32 %base)
108 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
109 %0 = load i32* %arrayidx
110 %sum = add nsw i32 %0, %base
111 %next = add i32 %iv, 1
112 %exitcond = icmp eq i32 %next, %i
113 br i1 %exitcond, label %exit, label %body1
119 !0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
121 define i32 @test_loop_early_exits(i32 %i, i32* %a) {
122 ; Check that we sink early exit blocks out of loop bodies.
123 ; CHECK: test_loop_early_exits:
138 %iv = phi i32 [ 0, %entry ], [ %next, %body4 ]
139 %base = phi i32 [ 0, %entry ], [ %sum, %body4 ]
140 %bailcond1 = icmp eq i32 %base, 42
141 br i1 %bailcond1, label %bail1, label %body2
147 %bailcond2 = icmp eq i32 %base, 43
148 br i1 %bailcond2, label %bail2, label %body3
154 %bailcond3 = icmp eq i32 %base, 44
155 br i1 %bailcond3, label %bail3, label %body4
161 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
162 %0 = load i32* %arrayidx
163 %sum = add nsw i32 %0, %base
164 %next = add i32 %iv, 1
165 %exitcond = icmp eq i32 %next, %i
166 br i1 %exitcond, label %exit, label %body1
172 define i32 @test_loop_rotate(i32 %i, i32* %a) {
173 ; Check that we rotate conditional exits from the loop to the bottom of the
174 ; loop, eliminating unconditional branches to the top.
175 ; CHECK: test_loop_rotate:
185 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
186 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
187 %next = add i32 %iv, 1
188 %exitcond = icmp eq i32 %next, %i
189 br i1 %exitcond, label %exit, label %body1
192 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
193 %0 = load i32* %arrayidx
194 %sum = add nsw i32 %0, %base
195 %bailcond1 = icmp eq i32 %sum, 42
202 define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
203 ; Check that we don't try to rotate a loop which is already laid out with
204 ; fallthrough opportunities into the top and out of the bottom.
205 ; CHECK: test_no_loop_rotate:
215 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
216 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
217 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
218 %0 = load i32* %arrayidx
219 %sum = add nsw i32 %0, %base
220 %bailcond1 = icmp eq i32 %sum, 42
221 br i1 %bailcond1, label %exit, label %body1
224 %next = add i32 %iv, 1
225 %exitcond = icmp eq i32 %next, %i
226 br i1 %exitcond, label %exit, label %body0
232 define void @test_loop_rotate_reversed_blocks() {
233 ; This test case (greatly reduced from an Olden bencmark) ensures that the loop
234 ; rotate implementation doesn't assume that loops are laid out in a particular
235 ; order. The first loop will get split into two basic blocks, with the loop
236 ; header coming after the loop latch.
238 ; CHECK: test_loop_rotate_reversed_blocks
240 ; Look for a jump into the middle of the loop, and no branches mid-way.
243 ; CHECK-NOT: j{{\w*}} .LBB{{.*}}
248 %cond1 = load volatile i1* undef
249 br i1 %cond1, label %loop2.preheader, label %loop1
253 %cond2 = load volatile i1* undef
254 br i1 %cond2, label %loop2.preheader, label %loop1
258 %cond3 = load volatile i1* undef
259 br i1 %cond3, label %exit, label %loop2
263 %cond4 = load volatile i1* undef
264 br i1 %cond4, label %exit, label %loop2
270 define i32 @test_loop_align(i32 %i, i32* %a) {
271 ; Check that we provide basic loop body alignment with the block placement
273 ; CHECK: test_loop_align:
275 ; CHECK: .align [[ALIGN:[0-9]+]],
283 %iv = phi i32 [ 0, %entry ], [ %next, %body ]
284 %base = phi i32 [ 0, %entry ], [ %sum, %body ]
285 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
286 %0 = load i32* %arrayidx
287 %sum = add nsw i32 %0, %base
288 %next = add i32 %iv, 1
289 %exitcond = icmp eq i32 %next, %i
290 br i1 %exitcond, label %exit, label %body
296 define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
297 ; Check that we provide nested loop body alignment.
298 ; CHECK: test_nested_loop_align:
300 ; CHECK: .align [[ALIGN]],
301 ; CHECK-NEXT: %loop.body.1
302 ; CHECK: .align [[ALIGN]],
303 ; CHECK-NEXT: %inner.loop.body
308 br label %loop.body.1
311 %iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
312 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
313 %bidx = load i32* %arrayidx
314 br label %inner.loop.body
317 %inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
318 %base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
319 %scaled_idx = mul i32 %bidx, %iv
320 %inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
321 %0 = load i32* %inner.arrayidx
322 %sum = add nsw i32 %0, %base
323 %inner.next = add i32 %iv, 1
324 %inner.exitcond = icmp eq i32 %inner.next, %i
325 br i1 %inner.exitcond, label %loop.body.2, label %inner.loop.body
328 %next = add i32 %iv, 1
329 %exitcond = icmp eq i32 %next, %i
330 br i1 %exitcond, label %exit, label %loop.body.1
336 define void @unnatural_cfg1() {
337 ; Test that we can handle a loop with an inner unnatural loop at the end of
338 ; a function. This is a gross CFG reduced out of the single source GCC.
339 ; CHECK: unnatural_cfg1
346 br label %loop.header
352 br i1 undef, label %loop.body3, label %loop.body2
355 %ptr = load i32** undef, align 4
359 %myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
360 %bcmyptr = bitcast i32* %myptr to i32*
361 %val = load i32* %bcmyptr, align 4
362 %comp = icmp eq i32 %val, 48
363 br i1 %comp, label %loop.body4, label %loop.body5
366 br i1 undef, label %loop.header, label %loop.body5
369 %ptr2 = load i32** undef, align 4
373 define void @unnatural_cfg2() {
374 ; Test that we can handle a loop with a nested natural loop *and* an unnatural
375 ; loop. This was reduced from a crash on block placement when run over
377 ; CHECK: unnatural_cfg2
382 ; CHECK: %loop.inner1.begin
383 ; The end block is folded with %loop.body3...
384 ; CHECK-NOT: %loop.inner1.end
386 ; CHECK: %loop.inner2.begin
387 ; The loop.inner2.end block is folded
388 ; CHECK: %loop.header
392 br label %loop.header
395 %comp0 = icmp eq i32* undef, null
396 br i1 %comp0, label %bail, label %loop.body1
399 %val0 = load i32** undef, align 4
400 br i1 undef, label %loop.body2, label %loop.inner1.begin
403 br i1 undef, label %loop.body4, label %loop.body3
406 %ptr1 = getelementptr inbounds i32* %val0, i32 0
407 %castptr1 = bitcast i32* %ptr1 to i32**
408 %val1 = load i32** %castptr1, align 4
409 br label %loop.inner1.begin
412 %valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
413 %castval = bitcast i32* %valphi to i32*
414 %comp1 = icmp eq i32 undef, 48
415 br i1 %comp1, label %loop.inner1.end, label %loop.body4
418 %ptr2 = getelementptr inbounds i32* %valphi, i32 0
419 %castptr2 = bitcast i32* %ptr2 to i32**
420 %val2 = load i32** %castptr2, align 4
421 br label %loop.inner1.begin
427 %comp2 = icmp ult i32 undef, 3
428 br i1 %comp2, label %loop.inner2.begin, label %loop.end
431 br i1 false, label %loop.end, label %loop.inner2.end
434 %comp3 = icmp eq i32 undef, 1769472
435 br i1 %comp3, label %loop.end, label %loop.inner2.begin
438 br label %loop.header
444 define i32 @problematic_switch() {
445 ; This function's CFG caused overlow in the machine branch probability
446 ; calculation, triggering asserts. Make sure we don't crash on it.
447 ; CHECK: problematic_switch
450 switch i32 undef, label %exit [
451 i32 879, label %bogus
491 %merge = phi i32 [ 3, %step ], [ 6, %entry ]
495 define void @fpcmp_unanalyzable_branch(i1 %cond) {
496 ; This function's CFG contains an unanalyzable branch that is likely to be
497 ; split due to having a different high-probability predecessor.
498 ; CHECK: fpcmp_unanalyzable_branch
501 ; CHECK-NOT: %if.then
507 ; CHECK-NEXT: %if.then
510 ; Note that this branch must be strongly biased toward
511 ; 'entry.if.then_crit_edge' to ensure that we would try to form a chain for
512 ; 'entry' -> 'entry.if.then_crit_edge' -> 'if.then'. It is the last edge in that
513 ; chain which would violate the unanalyzable branch in 'exit', but we won't even
514 ; try this trick unless 'if.then' is believed to almost always be reached from
515 ; 'entry.if.then_crit_edge'.
516 br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
518 entry.if.then_crit_edge:
519 %.pre14 = load i8* undef, align 1, !tbaa !0
523 br i1 undef, label %if.end, label %exit
526 %cmp.i = fcmp une double 0.000000e+00, undef
527 br i1 %cmp.i, label %if.then, label %if.end
530 %0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
532 store i8 %1, i8* undef, align 4, !tbaa !0
539 !1 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
543 declare i32 @h(i32 %x)
545 define i32 @test_global_cfg_break_profitability() {
546 ; Check that our metrics for the profitability of a CFG break are global rather
547 ; than local. A successor may be very hot, but if the current block isn't, it
548 ; doesn't matter. Within this test the 'then' block is slightly warmer than the
549 ; 'else' block, but not nearly enough to merit merging it with the exit block
550 ; even though the probability of 'then' branching to the 'exit' block is very
552 ; CHECK: test_global_cfg_break_profitability
553 ; CHECK: calll {{_?}}f
554 ; CHECK: calll {{_?}}g
555 ; CHECK: calll {{_?}}h
559 br i1 undef, label %then, label %else, !prof !2
562 %then.result = call i32 @f()
566 %else.result = call i32 @g()
570 %result = phi i32 [ %then.result, %then ], [ %else.result, %else ]
571 %result2 = call i32 @h(i32 %result)
575 !2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
577 declare i32 @__gxx_personality_v0(...)
579 define void @test_eh_lpad_successor() {
580 ; Some times the landing pad ends up as the first successor of an invoke block.
581 ; When this happens, a strange result used to fall out of updateTerminators: we
582 ; didn't correctly locate the fallthrough successor, assuming blindly that the
583 ; first one was the fallthrough successor. As a result, we would add an
584 ; erroneous jump to the landing pad thinking *that* was the default successor.
585 ; CHECK: test_eh_lpad_successor
591 invoke i32 @f() to label %preheader unwind label %lpad
597 %lpad.val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
599 resume { i8*, i32 } %lpad.val
605 declare void @fake_throw() noreturn
607 define void @test_eh_throw() {
608 ; For blocks containing a 'throw' (or similar functionality), we have
609 ; a no-return invoke. In this case, only EH successors will exist, and
610 ; fallthrough simply won't occur. Make sure we don't crash trying to update
611 ; terminators for such constructs.
613 ; CHECK: test_eh_throw
618 invoke void @fake_throw() to label %continue unwind label %cleanup
624 %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
629 define void @test_unnatural_cfg_backwards_inner_loop() {
630 ; Test that when we encounter an unnatural CFG structure after having formed
631 ; a chain for an inner loop which happened to be laid out backwards we don't
632 ; attempt to merge onto the wrong end of the inner loop just because we find it
633 ; first. This was reduced from a crasher in GCC's single source.
635 ; CHECK: test_unnatural_cfg_backwards_inner_loop
643 br i1 undef, label %loop2a, label %body
649 %next.load = load i32** undef
650 br i1 %comp.a, label %loop2a, label %loop2b
653 %var = phi i32* [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
654 %next.var = phi i32* [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
655 %comp.a = icmp eq i32* %var, null
659 %gep = getelementptr inbounds i32* %var.phi, i32 0
660 %next.ptr = bitcast i32* %gep to i32**
661 store i32* %next.phi, i32** %next.ptr
665 %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
666 %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
670 define void @unanalyzable_branch_to_loop_header() {
671 ; Ensure that we can handle unanalyzable branches into loop headers. We
672 ; pre-form chains for unanalyzable branches, and will find the tail end of that
673 ; at the start of the loop. This function uses floating point comparison
674 ; fallthrough because that happens to always produce unanalyzable branches on
677 ; CHECK: unanalyzable_branch_to_loop_header
683 %cmp = fcmp une double 0.000000e+00, undef
684 br i1 %cmp, label %loop, label %exit
687 %cond = icmp eq i8 undef, 42
688 br i1 %cond, label %exit, label %loop
694 define void @unanalyzable_branch_to_best_succ(i1 %cond) {
695 ; Ensure that we can handle unanalyzable branches where the destination block
696 ; gets selected as the optimal sucessor to merge.
698 ; CHECK: unanalyzable_branch_to_best_succ
705 ; Bias this branch toward bar to ensure we form that chain.
706 br i1 %cond, label %bar, label %foo, !prof !1
709 %cmp = fcmp une double 0.000000e+00, undef
710 br i1 %cmp, label %bar, label %exit
720 define void @unanalyzable_branch_to_free_block(float %x) {
721 ; Ensure that we can handle unanalyzable branches where the destination block
722 ; gets selected as the best free block in the CFG.
724 ; CHECK: unanalyzable_branch_to_free_block
732 br i1 undef, label %a, label %b
739 %cmp = fcmp une float %x, undef
740 br i1 %cmp, label %c, label %exit
750 define void @many_unanalyzable_branches() {
751 ; Ensure that we don't crash as we're building up many unanalyzable branches,
754 ; CHECK: many_unanalyzable_branches
761 %val0 = load volatile float* undef
762 %cmp0 = fcmp une float %val0, undef
763 br i1 %cmp0, label %1, label %0
764 %val1 = load volatile float* undef
765 %cmp1 = fcmp une float %val1, undef
766 br i1 %cmp1, label %2, label %1
767 %val2 = load volatile float* undef
768 %cmp2 = fcmp une float %val2, undef
769 br i1 %cmp2, label %3, label %2
770 %val3 = load volatile float* undef
771 %cmp3 = fcmp une float %val3, undef
772 br i1 %cmp3, label %4, label %3
773 %val4 = load volatile float* undef
774 %cmp4 = fcmp une float %val4, undef
775 br i1 %cmp4, label %5, label %4
776 %val5 = load volatile float* undef
777 %cmp5 = fcmp une float %val5, undef
778 br i1 %cmp5, label %6, label %5
779 %val6 = load volatile float* undef
780 %cmp6 = fcmp une float %val6, undef
781 br i1 %cmp6, label %7, label %6
782 %val7 = load volatile float* undef
783 %cmp7 = fcmp une float %val7, undef
784 br i1 %cmp7, label %8, label %7
785 %val8 = load volatile float* undef
786 %cmp8 = fcmp une float %val8, undef
787 br i1 %cmp8, label %9, label %8
788 %val9 = load volatile float* undef
789 %cmp9 = fcmp une float %val9, undef
790 br i1 %cmp9, label %10, label %9
791 %val10 = load volatile float* undef
792 %cmp10 = fcmp une float %val10, undef
793 br i1 %cmp10, label %11, label %10
794 %val11 = load volatile float* undef
795 %cmp11 = fcmp une float %val11, undef
796 br i1 %cmp11, label %12, label %11
797 %val12 = load volatile float* undef
798 %cmp12 = fcmp une float %val12, undef
799 br i1 %cmp12, label %13, label %12
800 %val13 = load volatile float* undef
801 %cmp13 = fcmp une float %val13, undef
802 br i1 %cmp13, label %14, label %13
803 %val14 = load volatile float* undef
804 %cmp14 = fcmp une float %val14, undef
805 br i1 %cmp14, label %15, label %14
806 %val15 = load volatile float* undef
807 %cmp15 = fcmp une float %val15, undef
808 br i1 %cmp15, label %16, label %15
809 %val16 = load volatile float* undef
810 %cmp16 = fcmp une float %val16, undef
811 br i1 %cmp16, label %17, label %16
812 %val17 = load volatile float* undef
813 %cmp17 = fcmp une float %val17, undef
814 br i1 %cmp17, label %18, label %17
815 %val18 = load volatile float* undef
816 %cmp18 = fcmp une float %val18, undef
817 br i1 %cmp18, label %19, label %18
818 %val19 = load volatile float* undef
819 %cmp19 = fcmp une float %val19, undef
820 br i1 %cmp19, label %20, label %19
821 %val20 = load volatile float* undef
822 %cmp20 = fcmp une float %val20, undef
823 br i1 %cmp20, label %21, label %20
824 %val21 = load volatile float* undef
825 %cmp21 = fcmp une float %val21, undef
826 br i1 %cmp21, label %22, label %21
827 %val22 = load volatile float* undef
828 %cmp22 = fcmp une float %val22, undef
829 br i1 %cmp22, label %23, label %22
830 %val23 = load volatile float* undef
831 %cmp23 = fcmp une float %val23, undef
832 br i1 %cmp23, label %24, label %23
833 %val24 = load volatile float* undef
834 %cmp24 = fcmp une float %val24, undef
835 br i1 %cmp24, label %25, label %24
836 %val25 = load volatile float* undef
837 %cmp25 = fcmp une float %val25, undef
838 br i1 %cmp25, label %26, label %25
839 %val26 = load volatile float* undef
840 %cmp26 = fcmp une float %val26, undef
841 br i1 %cmp26, label %27, label %26
842 %val27 = load volatile float* undef
843 %cmp27 = fcmp une float %val27, undef
844 br i1 %cmp27, label %28, label %27
845 %val28 = load volatile float* undef
846 %cmp28 = fcmp une float %val28, undef
847 br i1 %cmp28, label %29, label %28
848 %val29 = load volatile float* undef
849 %cmp29 = fcmp une float %val29, undef
850 br i1 %cmp29, label %30, label %29
851 %val30 = load volatile float* undef
852 %cmp30 = fcmp une float %val30, undef
853 br i1 %cmp30, label %31, label %30
854 %val31 = load volatile float* undef
855 %cmp31 = fcmp une float %val31, undef
856 br i1 %cmp31, label %32, label %31
857 %val32 = load volatile float* undef
858 %cmp32 = fcmp une float %val32, undef
859 br i1 %cmp32, label %33, label %32
860 %val33 = load volatile float* undef
861 %cmp33 = fcmp une float %val33, undef
862 br i1 %cmp33, label %34, label %33
863 %val34 = load volatile float* undef
864 %cmp34 = fcmp une float %val34, undef
865 br i1 %cmp34, label %35, label %34
866 %val35 = load volatile float* undef
867 %cmp35 = fcmp une float %val35, undef
868 br i1 %cmp35, label %36, label %35
869 %val36 = load volatile float* undef
870 %cmp36 = fcmp une float %val36, undef
871 br i1 %cmp36, label %37, label %36
872 %val37 = load volatile float* undef
873 %cmp37 = fcmp une float %val37, undef
874 br i1 %cmp37, label %38, label %37
875 %val38 = load volatile float* undef
876 %cmp38 = fcmp une float %val38, undef
877 br i1 %cmp38, label %39, label %38
878 %val39 = load volatile float* undef
879 %cmp39 = fcmp une float %val39, undef
880 br i1 %cmp39, label %40, label %39
881 %val40 = load volatile float* undef
882 %cmp40 = fcmp une float %val40, undef
883 br i1 %cmp40, label %41, label %40
884 %val41 = load volatile float* undef
885 %cmp41 = fcmp une float %val41, undef
886 br i1 %cmp41, label %42, label %41
887 %val42 = load volatile float* undef
888 %cmp42 = fcmp une float %val42, undef
889 br i1 %cmp42, label %43, label %42
890 %val43 = load volatile float* undef
891 %cmp43 = fcmp une float %val43, undef
892 br i1 %cmp43, label %44, label %43
893 %val44 = load volatile float* undef
894 %cmp44 = fcmp une float %val44, undef
895 br i1 %cmp44, label %45, label %44
896 %val45 = load volatile float* undef
897 %cmp45 = fcmp une float %val45, undef
898 br i1 %cmp45, label %46, label %45
899 %val46 = load volatile float* undef
900 %cmp46 = fcmp une float %val46, undef
901 br i1 %cmp46, label %47, label %46
902 %val47 = load volatile float* undef
903 %cmp47 = fcmp une float %val47, undef
904 br i1 %cmp47, label %48, label %47
905 %val48 = load volatile float* undef
906 %cmp48 = fcmp une float %val48, undef
907 br i1 %cmp48, label %49, label %48
908 %val49 = load volatile float* undef
909 %cmp49 = fcmp une float %val49, undef
910 br i1 %cmp49, label %50, label %49
911 %val50 = load volatile float* undef
912 %cmp50 = fcmp une float %val50, undef
913 br i1 %cmp50, label %51, label %50
914 %val51 = load volatile float* undef
915 %cmp51 = fcmp une float %val51, undef
916 br i1 %cmp51, label %52, label %51
917 %val52 = load volatile float* undef
918 %cmp52 = fcmp une float %val52, undef
919 br i1 %cmp52, label %53, label %52
920 %val53 = load volatile float* undef
921 %cmp53 = fcmp une float %val53, undef
922 br i1 %cmp53, label %54, label %53
923 %val54 = load volatile float* undef
924 %cmp54 = fcmp une float %val54, undef
925 br i1 %cmp54, label %55, label %54
926 %val55 = load volatile float* undef
927 %cmp55 = fcmp une float %val55, undef
928 br i1 %cmp55, label %56, label %55
929 %val56 = load volatile float* undef
930 %cmp56 = fcmp une float %val56, undef
931 br i1 %cmp56, label %57, label %56
932 %val57 = load volatile float* undef
933 %cmp57 = fcmp une float %val57, undef
934 br i1 %cmp57, label %58, label %57
935 %val58 = load volatile float* undef
936 %cmp58 = fcmp une float %val58, undef
937 br i1 %cmp58, label %59, label %58
938 %val59 = load volatile float* undef
939 %cmp59 = fcmp une float %val59, undef
940 br i1 %cmp59, label %60, label %59
941 %val60 = load volatile float* undef
942 %cmp60 = fcmp une float %val60, undef
943 br i1 %cmp60, label %61, label %60
944 %val61 = load volatile float* undef
945 %cmp61 = fcmp une float %val61, undef
946 br i1 %cmp61, label %62, label %61
947 %val62 = load volatile float* undef
948 %cmp62 = fcmp une float %val62, undef
949 br i1 %cmp62, label %63, label %62
950 %val63 = load volatile float* undef
951 %cmp63 = fcmp une float %val63, undef
952 br i1 %cmp63, label %64, label %63
953 %val64 = load volatile float* undef
954 %cmp64 = fcmp une float %val64, undef
955 br i1 %cmp64, label %65, label %64
962 define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
963 ; This test case comes from the heapsort benchmark, and exemplifies several
964 ; important aspects to block placement in the presence of loops:
965 ; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
967 ; 2) The exiting edge from the loop which is rotated to be laid out at the
968 ; bottom of the loop needs to be exiting into the nearest enclosing loop (to
969 ; which there is an exit). Otherwise, we force that enclosing loop into
970 ; strange layouts that are siginificantly less efficient, often times maing
973 ; CHECK: @benchmark_heapsort
975 ; First rotated loop top.
982 ; Second rotated loop top
985 ; CHECK: %while.cond.outer
986 ; Third rotated loop top
990 ; CHECK: %land.lhs.true
997 %shr = ashr i32 %n, 1
998 %add = add nsw i32 %shr, 1
999 %arrayidx3 = getelementptr inbounds double* %ra, i64 1
1003 %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
1004 %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
1005 %cmp = icmp sgt i32 %l.0, 1
1006 br i1 %cmp, label %if.then, label %if.else
1009 %dec = add nsw i32 %l.0, -1
1010 %idxprom = sext i32 %dec to i64
1011 %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
1012 %0 = load double* %arrayidx, align 8
1016 %idxprom1 = sext i32 %ir.0 to i64
1017 %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
1018 %1 = load double* %arrayidx2, align 8
1019 %2 = load double* %arrayidx3, align 8
1020 store double %2, double* %arrayidx2, align 8
1021 %dec6 = add nsw i32 %ir.0, -1
1022 %cmp7 = icmp eq i32 %dec6, 1
1023 br i1 %cmp7, label %if.then8, label %if.end10
1026 store double %1, double* %arrayidx3, align 8
1030 %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
1031 %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
1032 %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
1033 %add31 = add nsw i32 %ir.1, 1
1034 br label %while.cond.outer
1037 %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
1038 %j.0.ph = shl i32 %j.0.ph.in, 1
1039 br label %while.cond
1042 %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
1043 %cmp11 = icmp sgt i32 %j.0, %ir.1
1044 br i1 %cmp11, label %while.end, label %while.body
1047 %cmp12 = icmp slt i32 %j.0, %ir.1
1048 br i1 %cmp12, label %land.lhs.true, label %if.end20
1051 %idxprom13 = sext i32 %j.0 to i64
1052 %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
1053 %3 = load double* %arrayidx14, align 8
1054 %add15 = add nsw i32 %j.0, 1
1055 %idxprom16 = sext i32 %add15 to i64
1056 %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
1057 %4 = load double* %arrayidx17, align 8
1058 %cmp18 = fcmp olt double %3, %4
1059 br i1 %cmp18, label %if.then19, label %if.end20
1065 %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
1066 %idxprom21 = sext i32 %j.1 to i64
1067 %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
1068 %5 = load double* %arrayidx22, align 8
1069 %cmp23 = fcmp olt double %rra.0, %5
1070 br i1 %cmp23, label %if.then24, label %while.cond
1073 %idxprom27 = sext i32 %j.0.ph.in to i64
1074 %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
1075 store double %5, double* %arrayidx28, align 8
1076 br label %while.cond.outer
1079 %idxprom33 = sext i32 %j.0.ph.in to i64
1080 %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
1081 store double %rra.0, double* %arrayidx34, align 8