The patch disabled unrolling in loop vectorization pass when VF==1 on x86 architecture,
by setting MaxInterleaveFactor to 1. Unrolling in loop vectorization pass may introduce
the cost of overflow check, memory boundary check and extra prologue/epilogue code when
regular unroller will unroll the loop another time. Disable it when VF==1 remove the
unnecessary cost on x86. The same can be done for other platforms after verifying
interleaving/memory bound checking to be not perf critical on those platforms.
Differential Revision: http://reviews.llvm.org/D9515
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@236613
91177308-0d34-0410-b5e6-
96231b3b80d8
/// \return The maximum interleave factor that any transform should try to
/// perform for this target. This number depends on the level of parallelism
/// and the number of execution units in the CPU.
- unsigned getMaxInterleaveFactor() const;
+ unsigned getMaxInterleaveFactor(unsigned VF) const;
/// \return The expected cost of arithmetic ops, such as mul, xor, fsub, etc.
unsigned
const APInt &Imm, Type *Ty) = 0;
virtual unsigned getNumberOfRegisters(bool Vector) = 0;
virtual unsigned getRegisterBitWidth(bool Vector) = 0;
- virtual unsigned getMaxInterleaveFactor() = 0;
+ virtual unsigned getMaxInterleaveFactor(unsigned VF) = 0;
virtual unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
OperandValueKind Opd2Info,
unsigned getRegisterBitWidth(bool Vector) override {
return Impl.getRegisterBitWidth(Vector);
}
- unsigned getMaxInterleaveFactor() override {
- return Impl.getMaxInterleaveFactor();
+ unsigned getMaxInterleaveFactor(unsigned VF) override {
+ return Impl.getMaxInterleaveFactor(VF);
}
unsigned
getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
unsigned getRegisterBitWidth(bool Vector) { return 32; }
- unsigned getMaxInterleaveFactor() { return 1; }
+ unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info,
unsigned getRegisterBitWidth(bool Vector) { return 32; }
- unsigned getMaxInterleaveFactor() { return 1; }
+ unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
unsigned getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
return TTIImpl->getRegisterBitWidth(Vector);
}
-unsigned TargetTransformInfo::getMaxInterleaveFactor() const {
- return TTIImpl->getMaxInterleaveFactor();
+unsigned TargetTransformInfo::getMaxInterleaveFactor(unsigned VF) const {
+ return TTIImpl->getMaxInterleaveFactor(VF);
}
unsigned TargetTransformInfo::getArithmeticInstrCost(
return Cost;
}
-unsigned AArch64TTIImpl::getMaxInterleaveFactor() {
+unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
if (ST->isCortexA57())
return 4;
return 2;
return 64;
}
- unsigned getMaxInterleaveFactor();
+ unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
return 32;
}
- unsigned getMaxInterleaveFactor() {
+ unsigned getMaxInterleaveFactor(unsigned VF) {
// These are out of order CPUs:
if (ST->isCortexA15() || ST->isSwift())
return 2;
}
-unsigned PPCTTIImpl::getMaxInterleaveFactor() {
+unsigned PPCTTIImpl::getMaxInterleaveFactor(unsigned VF) {
unsigned Directive = ST->getDarwinDirective();
// The 440 has no SIMD support, but floating-point instructions
// have a 5-cycle latency, so unroll by 5x for latency hiding.
bool enableAggressiveInterleaving(bool LoopHasReductions);
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
- unsigned getMaxInterleaveFactor();
+ unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
unsigned AMDGPUTTIImpl::getRegisterBitWidth(bool) { return 32; }
-unsigned AMDGPUTTIImpl::getMaxInterleaveFactor() {
+unsigned AMDGPUTTIImpl::getMaxInterleaveFactor(unsigned VF) {
// Semi-arbitrary large amount.
return 64;
}
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
- unsigned getMaxInterleaveFactor();
+ unsigned getMaxInterleaveFactor(unsigned VF);
};
} // end namespace llvm
}
-unsigned X86TTIImpl::getMaxInterleaveFactor() {
+unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
+ // If the loop will not be vectorized, don't interleave the loop.
+ // Let regular unroll to unroll the loop, which saves the overflow
+ // check and memory check cost.
+ if (VF == 1)
+ return 1;
+
if (ST->isAtom())
return 1;
unsigned getNumberOfRegisters(bool Vector);
unsigned getRegisterBitWidth(bool Vector);
- unsigned getMaxInterleaveFactor();
+ unsigned getMaxInterleaveFactor(unsigned VF);
unsigned getArithmeticInstrCost(
unsigned Opcode, Type *Ty,
TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
std::max(1U, (R.MaxLocalUsers - 1)));
// Clamp the unroll factor ranges to reasonable factors.
- unsigned MaxInterleaveSize = TTI.getMaxInterleaveFactor();
+ unsigned MaxInterleaveSize = TTI.getMaxInterleaveFactor(VF);
// Check if the user has overridden the unroll max.
if (VF == 1) {
; CHECK-VECTOR: store <4 x i32>
; CHECK-VECTOR: ret
;
+; For x86, loop unroll in loop vectorizer is disabled when VF==1.
+;
; CHECK-SCALAR-LABEL: @bar(
; CHECK-SCALAR: store i32
-; CHECK-SCALAR: store i32
+; CHECK-SCALAR-NOT: store i32
; CHECK-SCALAR: ret
define i32 @bar(i32* nocapture %A, i32 %n) nounwind uwtable ssp {
%1 = icmp sgt i32 %n, 0
--- /dev/null
+; This test makes sure that loop will not be unrolled in vectorization if VF computed
+; equals to 1.
+; RUN: opt < %s -loop-vectorize -S | FileCheck %s
+
+; Make sure there are no geps being merged.
+; CHECK-LABEL: @foo(
+; CHECK: getelementptr
+; CHECK-NOT: getelementptr
+
+@N = common global i32 0, align 4
+@a = common global [1000 x i32] zeroinitializer, align 16
+
+define void @foo() #0 {
+entry:
+ %0 = load i32, i32* @N, align 4
+ %cmp5 = icmp sgt i32 %0, 0
+ br i1 %cmp5, label %for.body.lr.ph, label %for.end
+
+for.body.lr.ph: ; preds = %entry
+ %conv = sext i32 %0 to i64
+ br label %for.body
+
+for.body: ; preds = %for.body.lr.ph, %for.body
+ %i.06 = phi i64 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
+ %mul = mul nuw nsw i64 %i.06, 7
+ %arrayidx = getelementptr inbounds [1000 x i32], [1000 x i32]* @a, i64 0, i64 %mul
+ store i32 3, i32* %arrayidx, align 4
+ %inc = add nuw nsw i64 %i.06, 1
+ %cmp = icmp slt i64 %inc, %conv
+ br i1 %cmp, label %for.body, label %for.end.loopexit
+
+for.end.loopexit: ; preds = %for.body
+ br label %for.end
+
+for.end: ; preds = %for.end.loopexit, %entry
+ ret void
+}