1 ; RUN: llc < %s -march=x86-64 -asm-verbose=false -mtriple=x86_64-unknown-linux-gnu -post-RA-scheduler=true | FileCheck %s
3 ; Currently, floating-point selects are lowered to CFG triangles.
4 ; This means that one side of the select is always unconditionally
5 ; evaluated, however with MachineSink we can sink the other side so
6 ; that it's conditionally evaluated.
10 ; CHECK-NEXT: testb $1, %dil
14 define double @foo(double %x, double %y, i1 %c) nounwind {
15 %a = fdiv double %x, 3.2
16 %b = fdiv double %y, 3.3
17 %z = select i1 %c, double %a, double %b
21 ; Hoist floating-point constant-pool loads out of loops.
26 define void @bar(double* nocapture %p, i64 %n) nounwind {
28 %0 = icmp sgt i64 %n, 0
29 br i1 %0, label %bb, label %return
32 %i.03 = phi i64 [ 0, %entry ], [ %3, %bb ]
33 %scevgep = getelementptr double* %p, i64 %i.03
34 %1 = load double* %scevgep, align 8
35 %2 = fdiv double 3.200000e+00, %1
36 store double %2, double* %scevgep, align 8
37 %3 = add nsw i64 %i.03, 1
38 %exitcond = icmp eq i64 %3, %n
39 br i1 %exitcond, label %return, label %bb
45 ; Sink instructions with dead EFLAGS defs.
51 define zeroext i8 @zzz(i8 zeroext %a, i8 zeroext %b) nounwind readnone {
53 %tmp = zext i8 %a to i32 ; <i32> [#uses=1]
54 %tmp2 = icmp eq i8 %a, 0 ; <i1> [#uses=1]
55 %tmp3 = or i8 %b, -128 ; <i8> [#uses=1]
56 %tmp4 = and i8 %b, 127 ; <i8> [#uses=1]
57 %b_addr.0 = select i1 %tmp2, i8 %tmp4, i8 %tmp3 ; <i8> [#uses=1]
61 ; Codegen should hoist and CSE these constants.
64 ; CHECK: LCPI3_0(%rip), %xmm0
65 ; CHECK: LCPI3_1(%rip), %xmm1
66 ; CHECK: LCPI3_2(%rip), %xmm2
71 @_minusZero.6007 = internal constant <4 x float> <float -0.000000e+00, float -0.000000e+00, float -0.000000e+00, float -0.000000e+00> ; <<4 x float>*> [#uses=0]
72 @twoTo23.6008 = internal constant <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06> ; <<4 x float>*> [#uses=0]
74 define void @vv(float* %y, float* %x, i32* %n) nounwind ssp {
79 %0 = bitcast float* %x_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
80 %1 = load <4 x float>* %0, align 16 ; <<4 x float>> [#uses=4]
81 %tmp20 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
82 %tmp22 = and <4 x i32> %tmp20, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647> ; <<4 x i32>> [#uses=1]
83 %tmp23 = bitcast <4 x i32> %tmp22 to <4 x float> ; <<4 x float>> [#uses=1]
84 %tmp25 = bitcast <4 x float> %1 to <4 x i32> ; <<4 x i32>> [#uses=1]
85 %tmp27 = and <4 x i32> %tmp25, <i32 -2147483648, i32 -2147483648, i32 -2147483648, i32 -2147483648> ; <<4 x i32>> [#uses=2]
86 %tmp30 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %tmp23, <4 x float> <float 8.388608e+06, float 8.388608e+06, float 8.388608e+06, float 8.388608e+06>, i8 5) ; <<4 x float>> [#uses=1]
87 %tmp34 = bitcast <4 x float> %tmp30 to <4 x i32> ; <<4 x i32>> [#uses=1]
88 %tmp36 = xor <4 x i32> %tmp34, <i32 -1, i32 -1, i32 -1, i32 -1> ; <<4 x i32>> [#uses=1]
89 %tmp37 = and <4 x i32> %tmp36, <i32 1258291200, i32 1258291200, i32 1258291200, i32 1258291200> ; <<4 x i32>> [#uses=1]
90 %tmp42 = or <4 x i32> %tmp37, %tmp27 ; <<4 x i32>> [#uses=1]
91 %tmp43 = bitcast <4 x i32> %tmp42 to <4 x float> ; <<4 x float>> [#uses=2]
92 %tmp45 = fadd <4 x float> %1, %tmp43 ; <<4 x float>> [#uses=1]
93 %tmp47 = fsub <4 x float> %tmp45, %tmp43 ; <<4 x float>> [#uses=2]
94 %tmp49 = call <4 x float> @llvm.x86.sse.cmp.ps(<4 x float> %1, <4 x float> %tmp47, i8 1) ; <<4 x float>> [#uses=1]
95 %2 = bitcast <4 x float> %tmp49 to <4 x i32> ; <<4 x i32>> [#uses=1]
96 %3 = call <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32> %2) nounwind readnone ; <<4 x float>> [#uses=1]
97 %tmp53 = fadd <4 x float> %tmp47, %3 ; <<4 x float>> [#uses=1]
98 %tmp55 = bitcast <4 x float> %tmp53 to <4 x i32> ; <<4 x i32>> [#uses=1]
99 %tmp57 = or <4 x i32> %tmp55, %tmp27 ; <<4 x i32>> [#uses=1]
100 %tmp58 = bitcast <4 x i32> %tmp57 to <4 x float> ; <<4 x float>> [#uses=1]
101 %4 = bitcast float* %y_addr.0 to <4 x float>* ; <<4 x float>*> [#uses=1]
102 store <4 x float> %tmp58, <4 x float>* %4, align 16
103 %5 = getelementptr float* %x_addr.0, i64 4 ; <float*> [#uses=1]
104 %6 = getelementptr float* %y_addr.0, i64 4 ; <float*> [#uses=1]
105 %7 = add i32 %i.0, 4 ; <i32> [#uses=1]
108 bb60: ; preds = %bb, %entry
109 %i.0 = phi i32 [ 0, %entry ], [ %7, %bb ] ; <i32> [#uses=2]
110 %x_addr.0 = phi float* [ %x, %entry ], [ %5, %bb ] ; <float*> [#uses=2]
111 %y_addr.0 = phi float* [ %y, %entry ], [ %6, %bb ] ; <float*> [#uses=2]
112 %8 = load i32* %n, align 4 ; <i32> [#uses=1]
113 %9 = icmp sgt i32 %8, %i.0 ; <i1> [#uses=1]
114 br i1 %9, label %bb, label %return
116 return: ; preds = %bb60
120 declare <4 x float> @llvm.x86.sse.cmp.ps(<4 x float>, <4 x float>, i8) nounwind readnone
122 declare <4 x float> @llvm.x86.sse2.cvtdq2ps(<4 x i32>) nounwind readnone
124 ; CodeGen should use the correct register class when extracting
125 ; a load from a zero-extending load for hoisting.
127 ; CHECK: default_get_pch_validity:
128 ; CHECK: movl cl_options_count(%rip), %ecx
130 @cl_options_count = external constant i32 ; <i32*> [#uses=2]
132 define void @default_get_pch_validity() nounwind {
134 %tmp4 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
135 %tmp5 = icmp eq i32 %tmp4, 0 ; <i1> [#uses=1]
136 br i1 %tmp5, label %bb6, label %bb2
138 bb2: ; preds = %bb2, %entry
139 %i.019 = phi i64 [ 0, %entry ], [ %tmp25, %bb2 ] ; <i64> [#uses=1]
140 %tmp25 = add i64 %i.019, 1 ; <i64> [#uses=2]
141 %tmp11 = load i32* @cl_options_count, align 4 ; <i32> [#uses=1]
142 %tmp12 = zext i32 %tmp11 to i64 ; <i64> [#uses=1]
143 %tmp13 = icmp ugt i64 %tmp12, %tmp25 ; <i1> [#uses=1]
144 br i1 %tmp13, label %bb2, label %bb6
146 bb6: ; preds = %bb2, %entry