1 ; RUN: llc -mtriple=i686-linux -pre-RA-sched=source < %s | FileCheck %s
3 declare void @error(i32 %i, i32 %a, i32 %b)
5 define i32 @test_ifchains(i32 %i, i32* %a, i32 %b) {
6 ; Test a chain of ifs, where the block guarded by the if is error handling code
7 ; that is not expected to run.
8 ; CHECK-LABEL: test_ifchains:
27 %gep1 = getelementptr i32* %a, i32 1
28 %val1 = load i32* %gep1
29 %cond1 = icmp ugt i32 %val1, 1
30 br i1 %cond1, label %then1, label %else1, !prof !0
33 call void @error(i32 %i, i32 1, i32 %b)
37 %gep2 = getelementptr i32* %a, i32 2
38 %val2 = load i32* %gep2
39 %cond2 = icmp ugt i32 %val2, 2
40 br i1 %cond2, label %then2, label %else2, !prof !0
43 call void @error(i32 %i, i32 1, i32 %b)
47 %gep3 = getelementptr i32* %a, i32 3
48 %val3 = load i32* %gep3
49 %cond3 = icmp ugt i32 %val3, 3
50 br i1 %cond3, label %then3, label %else3, !prof !0
53 call void @error(i32 %i, i32 1, i32 %b)
57 %gep4 = getelementptr i32* %a, i32 4
58 %val4 = load i32* %gep4
59 %cond4 = icmp ugt i32 %val4, 4
60 br i1 %cond4, label %then4, label %else4, !prof !0
63 call void @error(i32 %i, i32 1, i32 %b)
67 %gep5 = getelementptr i32* %a, i32 3
68 %val5 = load i32* %gep5
69 %cond5 = icmp ugt i32 %val5, 3
70 br i1 %cond5, label %then5, label %exit, !prof !0
73 call void @error(i32 %i, i32 1, i32 %b)
80 define i32 @test_loop_cold_blocks(i32 %i, i32* %a) {
81 ; Check that we sink cold loop blocks after the hot loop body.
82 ; CHECK-LABEL: test_loop_cold_blocks:
98 %iv = phi i32 [ 0, %entry ], [ %next, %body3 ]
99 %base = phi i32 [ 0, %entry ], [ %sum, %body3 ]
100 %unlikelycond1 = icmp slt i32 %base, 42
101 br i1 %unlikelycond1, label %unlikely1, label %body2, !prof !0
104 call void @error(i32 %i, i32 1, i32 %base)
108 %unlikelycond2 = icmp sgt i32 %base, 21
109 br i1 %unlikelycond2, label %unlikely2, label %body3, !prof !0
112 call void @error(i32 %i, i32 2, i32 %base)
116 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
117 %0 = load i32* %arrayidx
118 %sum = add nsw i32 %0, %base
119 %next = add i32 %iv, 1
120 %exitcond = icmp eq i32 %next, %i
121 br i1 %exitcond, label %exit, label %body1
127 !0 = metadata !{metadata !"branch_weights", i32 4, i32 64}
129 define i32 @test_loop_early_exits(i32 %i, i32* %a) {
130 ; Check that we sink early exit blocks out of loop bodies.
131 ; CHECK-LABEL: test_loop_early_exits:
146 %iv = phi i32 [ 0, %entry ], [ %next, %body4 ]
147 %base = phi i32 [ 0, %entry ], [ %sum, %body4 ]
148 %bailcond1 = icmp eq i32 %base, 42
149 br i1 %bailcond1, label %bail1, label %body2
155 %bailcond2 = icmp eq i32 %base, 43
156 br i1 %bailcond2, label %bail2, label %body3
162 %bailcond3 = icmp eq i32 %base, 44
163 br i1 %bailcond3, label %bail3, label %body4
169 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
170 %0 = load i32* %arrayidx
171 %sum = add nsw i32 %0, %base
172 %next = add i32 %iv, 1
173 %exitcond = icmp eq i32 %next, %i
174 br i1 %exitcond, label %exit, label %body1
180 define i32 @test_loop_rotate(i32 %i, i32* %a) {
181 ; Check that we rotate conditional exits from the loop to the bottom of the
182 ; loop, eliminating unconditional branches to the top.
183 ; CHECK-LABEL: test_loop_rotate:
193 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
194 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
195 %next = add i32 %iv, 1
196 %exitcond = icmp eq i32 %next, %i
197 br i1 %exitcond, label %exit, label %body1
200 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
201 %0 = load i32* %arrayidx
202 %sum = add nsw i32 %0, %base
203 %bailcond1 = icmp eq i32 %sum, 42
210 define i32 @test_no_loop_rotate(i32 %i, i32* %a) {
211 ; Check that we don't try to rotate a loop which is already laid out with
212 ; fallthrough opportunities into the top and out of the bottom.
213 ; CHECK-LABEL: test_no_loop_rotate:
223 %iv = phi i32 [ 0, %entry ], [ %next, %body1 ]
224 %base = phi i32 [ 0, %entry ], [ %sum, %body1 ]
225 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
226 %0 = load i32* %arrayidx
227 %sum = add nsw i32 %0, %base
228 %bailcond1 = icmp eq i32 %sum, 42
229 br i1 %bailcond1, label %exit, label %body1
232 %next = add i32 %iv, 1
233 %exitcond = icmp eq i32 %next, %i
234 br i1 %exitcond, label %exit, label %body0
240 define void @test_loop_rotate_reversed_blocks() {
241 ; This test case (greatly reduced from an Olden bencmark) ensures that the loop
242 ; rotate implementation doesn't assume that loops are laid out in a particular
243 ; order. The first loop will get split into two basic blocks, with the loop
244 ; header coming after the loop latch.
246 ; CHECK: test_loop_rotate_reversed_blocks
248 ; Look for a jump into the middle of the loop, and no branches mid-way.
251 ; CHECK-NOT: j{{\w*}} .LBB{{.*}}
256 %cond1 = load volatile i1* undef
257 br i1 %cond1, label %loop2.preheader, label %loop1
261 %cond2 = load volatile i1* undef
262 br i1 %cond2, label %loop2.preheader, label %loop1
266 %cond3 = load volatile i1* undef
267 br i1 %cond3, label %exit, label %loop2
271 %cond4 = load volatile i1* undef
272 br i1 %cond4, label %exit, label %loop2
278 define i32 @test_loop_align(i32 %i, i32* %a) {
279 ; Check that we provide basic loop body alignment with the block placement
281 ; CHECK-LABEL: test_loop_align:
283 ; CHECK: .align [[ALIGN:[0-9]+]],
291 %iv = phi i32 [ 0, %entry ], [ %next, %body ]
292 %base = phi i32 [ 0, %entry ], [ %sum, %body ]
293 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
294 %0 = load i32* %arrayidx
295 %sum = add nsw i32 %0, %base
296 %next = add i32 %iv, 1
297 %exitcond = icmp eq i32 %next, %i
298 br i1 %exitcond, label %exit, label %body
304 define i32 @test_nested_loop_align(i32 %i, i32* %a, i32* %b) {
305 ; Check that we provide nested loop body alignment.
306 ; CHECK-LABEL: test_nested_loop_align:
308 ; CHECK: .align [[ALIGN]],
309 ; CHECK-NEXT: %loop.body.1
310 ; CHECK: .align [[ALIGN]],
311 ; CHECK-NEXT: %inner.loop.body
316 br label %loop.body.1
319 %iv = phi i32 [ 0, %entry ], [ %next, %loop.body.2 ]
320 %arrayidx = getelementptr inbounds i32* %a, i32 %iv
321 %bidx = load i32* %arrayidx
322 br label %inner.loop.body
325 %inner.iv = phi i32 [ 0, %loop.body.1 ], [ %inner.next, %inner.loop.body ]
326 %base = phi i32 [ 0, %loop.body.1 ], [ %sum, %inner.loop.body ]
327 %scaled_idx = mul i32 %bidx, %iv
328 %inner.arrayidx = getelementptr inbounds i32* %b, i32 %scaled_idx
329 %0 = load i32* %inner.arrayidx
330 %sum = add nsw i32 %0, %base
331 %inner.next = add i32 %iv, 1
332 %inner.exitcond = icmp eq i32 %inner.next, %i
333 br i1 %inner.exitcond, label %loop.body.2, label %inner.loop.body
336 %next = add i32 %iv, 1
337 %exitcond = icmp eq i32 %next, %i
338 br i1 %exitcond, label %exit, label %loop.body.1
344 define void @unnatural_cfg1() {
345 ; Test that we can handle a loop with an inner unnatural loop at the end of
346 ; a function. This is a gross CFG reduced out of the single source GCC.
347 ; CHECK: unnatural_cfg1
354 br label %loop.header
360 br i1 undef, label %loop.body3, label %loop.body2
363 %ptr = load i32** undef, align 4
367 %myptr = phi i32* [ %ptr2, %loop.body5 ], [ %ptr, %loop.body2 ], [ undef, %loop.body1 ]
368 %bcmyptr = bitcast i32* %myptr to i32*
369 %val = load i32* %bcmyptr, align 4
370 %comp = icmp eq i32 %val, 48
371 br i1 %comp, label %loop.body4, label %loop.body5
374 br i1 undef, label %loop.header, label %loop.body5
377 %ptr2 = load i32** undef, align 4
381 define void @unnatural_cfg2() {
382 ; Test that we can handle a loop with a nested natural loop *and* an unnatural
383 ; loop. This was reduced from a crash on block placement when run over
385 ; CHECK: unnatural_cfg2
390 ; CHECK: %loop.inner1.begin
391 ; The end block is folded with %loop.body3...
392 ; CHECK-NOT: %loop.inner1.end
394 ; CHECK: %loop.inner2.begin
395 ; The loop.inner2.end block is folded
396 ; CHECK: %loop.header
400 br label %loop.header
403 %comp0 = icmp eq i32* undef, null
404 br i1 %comp0, label %bail, label %loop.body1
407 %val0 = load i32** undef, align 4
408 br i1 undef, label %loop.body2, label %loop.inner1.begin
411 br i1 undef, label %loop.body4, label %loop.body3
414 %ptr1 = getelementptr inbounds i32* %val0, i32 0
415 %castptr1 = bitcast i32* %ptr1 to i32**
416 %val1 = load i32** %castptr1, align 4
417 br label %loop.inner1.begin
420 %valphi = phi i32* [ %val2, %loop.inner1.end ], [ %val1, %loop.body3 ], [ %val0, %loop.body1 ]
421 %castval = bitcast i32* %valphi to i32*
422 %comp1 = icmp eq i32 undef, 48
423 br i1 %comp1, label %loop.inner1.end, label %loop.body4
426 %ptr2 = getelementptr inbounds i32* %valphi, i32 0
427 %castptr2 = bitcast i32* %ptr2 to i32**
428 %val2 = load i32** %castptr2, align 4
429 br label %loop.inner1.begin
435 %comp2 = icmp ult i32 undef, 3
436 br i1 %comp2, label %loop.inner2.begin, label %loop.end
439 br i1 false, label %loop.end, label %loop.inner2.end
442 %comp3 = icmp eq i32 undef, 1769472
443 br i1 %comp3, label %loop.end, label %loop.inner2.begin
446 br label %loop.header
452 define i32 @problematic_switch() {
453 ; This function's CFG caused overlow in the machine branch probability
454 ; calculation, triggering asserts. Make sure we don't crash on it.
455 ; CHECK: problematic_switch
458 switch i32 undef, label %exit [
459 i32 879, label %bogus
499 %merge = phi i32 [ 3, %step ], [ 6, %entry ]
503 define void @fpcmp_unanalyzable_branch(i1 %cond) {
504 ; This function's CFG contains an unanalyzable branch that is likely to be
505 ; split due to having a different high-probability predecessor.
506 ; CHECK: fpcmp_unanalyzable_branch
509 ; CHECK-NOT: %if.then
515 ; CHECK-NEXT: %if.then
518 ; Note that this branch must be strongly biased toward
519 ; 'entry.if.then_crit_edge' to ensure that we would try to form a chain for
520 ; 'entry' -> 'entry.if.then_crit_edge' -> 'if.then'. It is the last edge in that
521 ; chain which would violate the unanalyzable branch in 'exit', but we won't even
522 ; try this trick unless 'if.then' is believed to almost always be reached from
523 ; 'entry.if.then_crit_edge'.
524 br i1 %cond, label %entry.if.then_crit_edge, label %lor.lhs.false, !prof !1
526 entry.if.then_crit_edge:
527 %.pre14 = load i8* undef, align 1
531 br i1 undef, label %if.end, label %exit
534 %cmp.i = fcmp une double 0.000000e+00, undef
535 br i1 %cmp.i, label %if.then, label %if.end
538 %0 = phi i8 [ %.pre14, %entry.if.then_crit_edge ], [ undef, %exit ]
540 store i8 %1, i8* undef, align 4
547 !1 = metadata !{metadata !"branch_weights", i32 1000, i32 1}
551 declare i32 @h(i32 %x)
553 define i32 @test_global_cfg_break_profitability() {
554 ; Check that our metrics for the profitability of a CFG break are global rather
555 ; than local. A successor may be very hot, but if the current block isn't, it
556 ; doesn't matter. Within this test the 'then' block is slightly warmer than the
557 ; 'else' block, but not nearly enough to merit merging it with the exit block
558 ; even though the probability of 'then' branching to the 'exit' block is very
560 ; CHECK: test_global_cfg_break_profitability
561 ; CHECK: calll {{_?}}f
562 ; CHECK: calll {{_?}}g
563 ; CHECK: calll {{_?}}h
567 br i1 undef, label %then, label %else, !prof !2
570 %then.result = call i32 @f()
574 %else.result = call i32 @g()
578 %result = phi i32 [ %then.result, %then ], [ %else.result, %else ]
579 %result2 = call i32 @h(i32 %result)
583 !2 = metadata !{metadata !"branch_weights", i32 3, i32 1}
585 declare i32 @__gxx_personality_v0(...)
587 define void @test_eh_lpad_successor() {
588 ; Some times the landing pad ends up as the first successor of an invoke block.
589 ; When this happens, a strange result used to fall out of updateTerminators: we
590 ; didn't correctly locate the fallthrough successor, assuming blindly that the
591 ; first one was the fallthrough successor. As a result, we would add an
592 ; erroneous jump to the landing pad thinking *that* was the default successor.
593 ; CHECK: test_eh_lpad_successor
599 invoke i32 @f() to label %preheader unwind label %lpad
605 %lpad.val = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
607 resume { i8*, i32 } %lpad.val
613 declare void @fake_throw() noreturn
615 define void @test_eh_throw() {
616 ; For blocks containing a 'throw' (or similar functionality), we have
617 ; a no-return invoke. In this case, only EH successors will exist, and
618 ; fallthrough simply won't occur. Make sure we don't crash trying to update
619 ; terminators for such constructs.
621 ; CHECK: test_eh_throw
626 invoke void @fake_throw() to label %continue unwind label %cleanup
632 %0 = landingpad { i8*, i32 } personality i8* bitcast (i32 (...)* @__gxx_personality_v0 to i8*)
637 define void @test_unnatural_cfg_backwards_inner_loop() {
638 ; Test that when we encounter an unnatural CFG structure after having formed
639 ; a chain for an inner loop which happened to be laid out backwards we don't
640 ; attempt to merge onto the wrong end of the inner loop just because we find it
641 ; first. This was reduced from a crasher in GCC's single source.
643 ; CHECK: test_unnatural_cfg_backwards_inner_loop
645 ; CHECK: [[BODY:# BB#[0-9]+]]:
651 br i1 undef, label %loop2a, label %body
657 %next.load = load i32** undef
658 br i1 %comp.a, label %loop2a, label %loop2b
661 %var = phi i32* [ null, %entry ], [ null, %body ], [ %next.phi, %loop1 ]
662 %next.var = phi i32* [ null, %entry ], [ undef, %body ], [ %next.load, %loop1 ]
663 %comp.a = icmp eq i32* %var, null
667 %gep = getelementptr inbounds i32* %var.phi, i32 0
668 %next.ptr = bitcast i32* %gep to i32**
669 store i32* %next.phi, i32** %next.ptr
673 %var.phi = phi i32* [ %next.phi, %loop2b ], [ %var, %loop2a ]
674 %next.phi = phi i32* [ %next.load, %loop2b ], [ %next.var, %loop2a ]
678 define void @unanalyzable_branch_to_loop_header() {
679 ; Ensure that we can handle unanalyzable branches into loop headers. We
680 ; pre-form chains for unanalyzable branches, and will find the tail end of that
681 ; at the start of the loop. This function uses floating point comparison
682 ; fallthrough because that happens to always produce unanalyzable branches on
685 ; CHECK: unanalyzable_branch_to_loop_header
691 %cmp = fcmp une double 0.000000e+00, undef
692 br i1 %cmp, label %loop, label %exit
695 %cond = icmp eq i8 undef, 42
696 br i1 %cond, label %exit, label %loop
702 define void @unanalyzable_branch_to_best_succ(i1 %cond) {
703 ; Ensure that we can handle unanalyzable branches where the destination block
704 ; gets selected as the optimal sucessor to merge.
706 ; CHECK: unanalyzable_branch_to_best_succ
713 ; Bias this branch toward bar to ensure we form that chain.
714 br i1 %cond, label %bar, label %foo, !prof !1
717 %cmp = fcmp une double 0.000000e+00, undef
718 br i1 %cmp, label %bar, label %exit
728 define void @unanalyzable_branch_to_free_block(float %x) {
729 ; Ensure that we can handle unanalyzable branches where the destination block
730 ; gets selected as the best free block in the CFG.
732 ; CHECK: unanalyzable_branch_to_free_block
740 br i1 undef, label %a, label %b
747 %cmp = fcmp une float %x, undef
748 br i1 %cmp, label %c, label %exit
758 define void @many_unanalyzable_branches() {
759 ; Ensure that we don't crash as we're building up many unanalyzable branches,
762 ; CHECK: many_unanalyzable_branches
769 %val0 = load volatile float* undef
770 %cmp0 = fcmp une float %val0, undef
771 br i1 %cmp0, label %1, label %0
772 %val1 = load volatile float* undef
773 %cmp1 = fcmp une float %val1, undef
774 br i1 %cmp1, label %2, label %1
775 %val2 = load volatile float* undef
776 %cmp2 = fcmp une float %val2, undef
777 br i1 %cmp2, label %3, label %2
778 %val3 = load volatile float* undef
779 %cmp3 = fcmp une float %val3, undef
780 br i1 %cmp3, label %4, label %3
781 %val4 = load volatile float* undef
782 %cmp4 = fcmp une float %val4, undef
783 br i1 %cmp4, label %5, label %4
784 %val5 = load volatile float* undef
785 %cmp5 = fcmp une float %val5, undef
786 br i1 %cmp5, label %6, label %5
787 %val6 = load volatile float* undef
788 %cmp6 = fcmp une float %val6, undef
789 br i1 %cmp6, label %7, label %6
790 %val7 = load volatile float* undef
791 %cmp7 = fcmp une float %val7, undef
792 br i1 %cmp7, label %8, label %7
793 %val8 = load volatile float* undef
794 %cmp8 = fcmp une float %val8, undef
795 br i1 %cmp8, label %9, label %8
796 %val9 = load volatile float* undef
797 %cmp9 = fcmp une float %val9, undef
798 br i1 %cmp9, label %10, label %9
799 %val10 = load volatile float* undef
800 %cmp10 = fcmp une float %val10, undef
801 br i1 %cmp10, label %11, label %10
802 %val11 = load volatile float* undef
803 %cmp11 = fcmp une float %val11, undef
804 br i1 %cmp11, label %12, label %11
805 %val12 = load volatile float* undef
806 %cmp12 = fcmp une float %val12, undef
807 br i1 %cmp12, label %13, label %12
808 %val13 = load volatile float* undef
809 %cmp13 = fcmp une float %val13, undef
810 br i1 %cmp13, label %14, label %13
811 %val14 = load volatile float* undef
812 %cmp14 = fcmp une float %val14, undef
813 br i1 %cmp14, label %15, label %14
814 %val15 = load volatile float* undef
815 %cmp15 = fcmp une float %val15, undef
816 br i1 %cmp15, label %16, label %15
817 %val16 = load volatile float* undef
818 %cmp16 = fcmp une float %val16, undef
819 br i1 %cmp16, label %17, label %16
820 %val17 = load volatile float* undef
821 %cmp17 = fcmp une float %val17, undef
822 br i1 %cmp17, label %18, label %17
823 %val18 = load volatile float* undef
824 %cmp18 = fcmp une float %val18, undef
825 br i1 %cmp18, label %19, label %18
826 %val19 = load volatile float* undef
827 %cmp19 = fcmp une float %val19, undef
828 br i1 %cmp19, label %20, label %19
829 %val20 = load volatile float* undef
830 %cmp20 = fcmp une float %val20, undef
831 br i1 %cmp20, label %21, label %20
832 %val21 = load volatile float* undef
833 %cmp21 = fcmp une float %val21, undef
834 br i1 %cmp21, label %22, label %21
835 %val22 = load volatile float* undef
836 %cmp22 = fcmp une float %val22, undef
837 br i1 %cmp22, label %23, label %22
838 %val23 = load volatile float* undef
839 %cmp23 = fcmp une float %val23, undef
840 br i1 %cmp23, label %24, label %23
841 %val24 = load volatile float* undef
842 %cmp24 = fcmp une float %val24, undef
843 br i1 %cmp24, label %25, label %24
844 %val25 = load volatile float* undef
845 %cmp25 = fcmp une float %val25, undef
846 br i1 %cmp25, label %26, label %25
847 %val26 = load volatile float* undef
848 %cmp26 = fcmp une float %val26, undef
849 br i1 %cmp26, label %27, label %26
850 %val27 = load volatile float* undef
851 %cmp27 = fcmp une float %val27, undef
852 br i1 %cmp27, label %28, label %27
853 %val28 = load volatile float* undef
854 %cmp28 = fcmp une float %val28, undef
855 br i1 %cmp28, label %29, label %28
856 %val29 = load volatile float* undef
857 %cmp29 = fcmp une float %val29, undef
858 br i1 %cmp29, label %30, label %29
859 %val30 = load volatile float* undef
860 %cmp30 = fcmp une float %val30, undef
861 br i1 %cmp30, label %31, label %30
862 %val31 = load volatile float* undef
863 %cmp31 = fcmp une float %val31, undef
864 br i1 %cmp31, label %32, label %31
865 %val32 = load volatile float* undef
866 %cmp32 = fcmp une float %val32, undef
867 br i1 %cmp32, label %33, label %32
868 %val33 = load volatile float* undef
869 %cmp33 = fcmp une float %val33, undef
870 br i1 %cmp33, label %34, label %33
871 %val34 = load volatile float* undef
872 %cmp34 = fcmp une float %val34, undef
873 br i1 %cmp34, label %35, label %34
874 %val35 = load volatile float* undef
875 %cmp35 = fcmp une float %val35, undef
876 br i1 %cmp35, label %36, label %35
877 %val36 = load volatile float* undef
878 %cmp36 = fcmp une float %val36, undef
879 br i1 %cmp36, label %37, label %36
880 %val37 = load volatile float* undef
881 %cmp37 = fcmp une float %val37, undef
882 br i1 %cmp37, label %38, label %37
883 %val38 = load volatile float* undef
884 %cmp38 = fcmp une float %val38, undef
885 br i1 %cmp38, label %39, label %38
886 %val39 = load volatile float* undef
887 %cmp39 = fcmp une float %val39, undef
888 br i1 %cmp39, label %40, label %39
889 %val40 = load volatile float* undef
890 %cmp40 = fcmp une float %val40, undef
891 br i1 %cmp40, label %41, label %40
892 %val41 = load volatile float* undef
893 %cmp41 = fcmp une float %val41, undef
894 br i1 %cmp41, label %42, label %41
895 %val42 = load volatile float* undef
896 %cmp42 = fcmp une float %val42, undef
897 br i1 %cmp42, label %43, label %42
898 %val43 = load volatile float* undef
899 %cmp43 = fcmp une float %val43, undef
900 br i1 %cmp43, label %44, label %43
901 %val44 = load volatile float* undef
902 %cmp44 = fcmp une float %val44, undef
903 br i1 %cmp44, label %45, label %44
904 %val45 = load volatile float* undef
905 %cmp45 = fcmp une float %val45, undef
906 br i1 %cmp45, label %46, label %45
907 %val46 = load volatile float* undef
908 %cmp46 = fcmp une float %val46, undef
909 br i1 %cmp46, label %47, label %46
910 %val47 = load volatile float* undef
911 %cmp47 = fcmp une float %val47, undef
912 br i1 %cmp47, label %48, label %47
913 %val48 = load volatile float* undef
914 %cmp48 = fcmp une float %val48, undef
915 br i1 %cmp48, label %49, label %48
916 %val49 = load volatile float* undef
917 %cmp49 = fcmp une float %val49, undef
918 br i1 %cmp49, label %50, label %49
919 %val50 = load volatile float* undef
920 %cmp50 = fcmp une float %val50, undef
921 br i1 %cmp50, label %51, label %50
922 %val51 = load volatile float* undef
923 %cmp51 = fcmp une float %val51, undef
924 br i1 %cmp51, label %52, label %51
925 %val52 = load volatile float* undef
926 %cmp52 = fcmp une float %val52, undef
927 br i1 %cmp52, label %53, label %52
928 %val53 = load volatile float* undef
929 %cmp53 = fcmp une float %val53, undef
930 br i1 %cmp53, label %54, label %53
931 %val54 = load volatile float* undef
932 %cmp54 = fcmp une float %val54, undef
933 br i1 %cmp54, label %55, label %54
934 %val55 = load volatile float* undef
935 %cmp55 = fcmp une float %val55, undef
936 br i1 %cmp55, label %56, label %55
937 %val56 = load volatile float* undef
938 %cmp56 = fcmp une float %val56, undef
939 br i1 %cmp56, label %57, label %56
940 %val57 = load volatile float* undef
941 %cmp57 = fcmp une float %val57, undef
942 br i1 %cmp57, label %58, label %57
943 %val58 = load volatile float* undef
944 %cmp58 = fcmp une float %val58, undef
945 br i1 %cmp58, label %59, label %58
946 %val59 = load volatile float* undef
947 %cmp59 = fcmp une float %val59, undef
948 br i1 %cmp59, label %60, label %59
949 %val60 = load volatile float* undef
950 %cmp60 = fcmp une float %val60, undef
951 br i1 %cmp60, label %61, label %60
952 %val61 = load volatile float* undef
953 %cmp61 = fcmp une float %val61, undef
954 br i1 %cmp61, label %62, label %61
955 %val62 = load volatile float* undef
956 %cmp62 = fcmp une float %val62, undef
957 br i1 %cmp62, label %63, label %62
958 %val63 = load volatile float* undef
959 %cmp63 = fcmp une float %val63, undef
960 br i1 %cmp63, label %64, label %63
961 %val64 = load volatile float* undef
962 %cmp64 = fcmp une float %val64, undef
963 br i1 %cmp64, label %65, label %64
970 define void @benchmark_heapsort(i32 %n, double* nocapture %ra) {
971 ; This test case comes from the heapsort benchmark, and exemplifies several
972 ; important aspects to block placement in the presence of loops:
973 ; 1) Loop rotation needs to *ensure* that the desired exiting edge can be
975 ; 2) The exiting edge from the loop which is rotated to be laid out at the
976 ; bottom of the loop needs to be exiting into the nearest enclosing loop (to
977 ; which there is an exit). Otherwise, we force that enclosing loop into
978 ; strange layouts that are siginificantly less efficient, often times maing
981 ; CHECK: @benchmark_heapsort
983 ; First rotated loop top.
990 ; Second rotated loop top
993 ; CHECK: %while.cond.outer
994 ; Third rotated loop top
998 ; CHECK: %land.lhs.true
1005 %shr = ashr i32 %n, 1
1006 %add = add nsw i32 %shr, 1
1007 %arrayidx3 = getelementptr inbounds double* %ra, i64 1
1011 %ir.0 = phi i32 [ %n, %entry ], [ %ir.1, %while.end ]
1012 %l.0 = phi i32 [ %add, %entry ], [ %l.1, %while.end ]
1013 %cmp = icmp sgt i32 %l.0, 1
1014 br i1 %cmp, label %if.then, label %if.else
1017 %dec = add nsw i32 %l.0, -1
1018 %idxprom = sext i32 %dec to i64
1019 %arrayidx = getelementptr inbounds double* %ra, i64 %idxprom
1020 %0 = load double* %arrayidx, align 8
1024 %idxprom1 = sext i32 %ir.0 to i64
1025 %arrayidx2 = getelementptr inbounds double* %ra, i64 %idxprom1
1026 %1 = load double* %arrayidx2, align 8
1027 %2 = load double* %arrayidx3, align 8
1028 store double %2, double* %arrayidx2, align 8
1029 %dec6 = add nsw i32 %ir.0, -1
1030 %cmp7 = icmp eq i32 %dec6, 1
1031 br i1 %cmp7, label %if.then8, label %if.end10
1034 store double %1, double* %arrayidx3, align 8
1038 %ir.1 = phi i32 [ %ir.0, %if.then ], [ %dec6, %if.else ]
1039 %l.1 = phi i32 [ %dec, %if.then ], [ %l.0, %if.else ]
1040 %rra.0 = phi double [ %0, %if.then ], [ %1, %if.else ]
1041 %add31 = add nsw i32 %ir.1, 1
1042 br label %while.cond.outer
1045 %j.0.ph.in = phi i32 [ %l.1, %if.end10 ], [ %j.1, %if.then24 ]
1046 %j.0.ph = shl i32 %j.0.ph.in, 1
1047 br label %while.cond
1050 %j.0 = phi i32 [ %add31, %if.end20 ], [ %j.0.ph, %while.cond.outer ]
1051 %cmp11 = icmp sgt i32 %j.0, %ir.1
1052 br i1 %cmp11, label %while.end, label %while.body
1055 %cmp12 = icmp slt i32 %j.0, %ir.1
1056 br i1 %cmp12, label %land.lhs.true, label %if.end20
1059 %idxprom13 = sext i32 %j.0 to i64
1060 %arrayidx14 = getelementptr inbounds double* %ra, i64 %idxprom13
1061 %3 = load double* %arrayidx14, align 8
1062 %add15 = add nsw i32 %j.0, 1
1063 %idxprom16 = sext i32 %add15 to i64
1064 %arrayidx17 = getelementptr inbounds double* %ra, i64 %idxprom16
1065 %4 = load double* %arrayidx17, align 8
1066 %cmp18 = fcmp olt double %3, %4
1067 br i1 %cmp18, label %if.then19, label %if.end20
1073 %j.1 = phi i32 [ %add15, %if.then19 ], [ %j.0, %land.lhs.true ], [ %j.0, %while.body ]
1074 %idxprom21 = sext i32 %j.1 to i64
1075 %arrayidx22 = getelementptr inbounds double* %ra, i64 %idxprom21
1076 %5 = load double* %arrayidx22, align 8
1077 %cmp23 = fcmp olt double %rra.0, %5
1078 br i1 %cmp23, label %if.then24, label %while.cond
1081 %idxprom27 = sext i32 %j.0.ph.in to i64
1082 %arrayidx28 = getelementptr inbounds double* %ra, i64 %idxprom27
1083 store double %5, double* %arrayidx28, align 8
1084 br label %while.cond.outer
1087 %idxprom33 = sext i32 %j.0.ph.in to i64
1088 %arrayidx34 = getelementptr inbounds double* %ra, i64 %idxprom33
1089 store double %rra.0, double* %arrayidx34, align 8
1093 declare void @cold_function() cold
1095 define i32 @test_cold_calls(i32* %a) {
1096 ; Test that edges to blocks post-dominated by cold calls are
1097 ; marked as not expected to be taken. They should be laid out
1099 ; CHECK-LABEL: test_cold_calls:
1106 %gep1 = getelementptr i32* %a, i32 1
1107 %val1 = load i32* %gep1
1108 %cond1 = icmp ugt i32 %val1, 1
1109 br i1 %cond1, label %then, label %else
1112 call void @cold_function()
1116 %gep2 = getelementptr i32* %a, i32 2
1117 %val2 = load i32* %gep2
1121 %ret = phi i32 [ %val1, %then ], [ %val2, %else ]