case Instruction::Store:
ImmIdx = 0;
break;
+ case Instruction::ICmp:
+ // This is an imperfect hack to prevent constant hoisting of
+ // compares that might be trying to check if a 64-bit value fits in
+ // 32-bits. The backend can optimize these cases using a right shift by 32.
+ // Ideally we would check the compare predicate here. There also other
+ // similar immediates the backend can use shifts for.
+ if (Idx == 1 && Imm.getBitWidth() == 64) {
+ uint64_t ImmVal = Imm.getZExtValue();
+ if (ImmVal == 0x100000000ULL || ImmVal == 0xffffffff)
+ return TTI::TCC_Free;
+ }
+ ImmIdx = 1;
+ break;
case Instruction::And:
// We support 64-bit ANDs with immediates with 32-bits of leading zeroes
// by using a 32-bit operation with implicit zero extension. Detect such
case Instruction::SRem:
case Instruction::Or:
case Instruction::Xor:
- case Instruction::ICmp:
ImmIdx = 1;
break;
// Always return TCC_Free for the shift value of a shift instruction.
--- /dev/null
+; RUN: llc < %s -O3 -march=x86-64 |FileCheck %s
+define i64 @foo(i64 %data1, i64 %data2, i64 %data3)
+{
+; If constant 4294967295 is hoisted to a variable, then we won't be able to
+; use a shift right by 32 to optimize the compare.
+entry:
+ %val1 = add i64 %data3, 1
+ %x = icmp ugt i64 %data1, 4294967295
+ br i1 %x, label %End, label %L_val2
+
+; CHECK: shrq $32, {{.*}}
+; CHECK: shrq $32, {{.*}}
+L_val2:
+ %val2 = add i64 %data3, 2
+ %y = icmp ugt i64 %data2, 4294967295
+ br i1 %y, label %End, label %L_val3
+
+L_val3:
+ %val3 = add i64 %data3, 3
+ br label %End
+
+End:
+ %p1 = phi i64 [%val1,%entry], [%val2,%L_val2], [%val3,%L_val3]
+ ret i64 %p1
+}