From 25e8e79fab57f4a95165f555ae67d0839d9a6399 Mon Sep 17 00:00:00 2001 From: David Majnemer Date: Fri, 2 Jan 2015 07:29:43 +0000 Subject: [PATCH] Analysis: Reformulate WillNotOverflowUnsignedMul for reusability WillNotOverflowUnsignedMul's smarts will live in ValueTracking as computeOverflowForUnsignedMul. It now returns a tri-state result: never overflows, always overflows and sometimes overflows. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@225076 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/Analysis/ValueTracking.h | 6 +++ lib/Analysis/ValueTracking.cpp | 39 +++++++++++++++++++ lib/Transforms/InstCombine/InstCombine.h | 5 ++- .../InstCombine/InstCombineCalls.cpp | 20 +--------- .../InstCombine/InstCombineMulDivRem.cpp | 37 ++---------------- 5 files changed, 54 insertions(+), 53 deletions(-) diff --git a/include/llvm/Analysis/ValueTracking.h b/include/llvm/Analysis/ValueTracking.h index 6bbf4f46d98..7c139cd06c8 100644 --- a/include/llvm/Analysis/ValueTracking.h +++ b/include/llvm/Analysis/ValueTracking.h @@ -217,6 +217,12 @@ namespace llvm { const DataLayout *DL = nullptr, const DominatorTree *DT = nullptr); + enum class OverflowResult { AlwaysOverflows, MayOverflow, NeverOverflows }; + OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS, + const DataLayout *DL, + AssumptionTracker *AT, + const Instruction *CxtI, + const DominatorTree *DT); } // end namespace llvm #endif diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index 70ee35424ea..cb1e285e8f3 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -2672,3 +2672,42 @@ bool llvm::isKnownNonNull(const Value *V, const TargetLibraryInfo *TLI) { return false; } + +OverflowResult llvm::computeOverflowForUnsignedMul(Value *LHS, Value *RHS, + const DataLayout *DL, + AssumptionTracker *AT, + const Instruction *CxtI, + const DominatorTree *DT) { + // Multiplying n * m significant bits yields a result of n + m significant + // bits. If the total number of significant bits does not exceed the + // result bit width (minus 1), there is no overflow. + // This means if we have enough leading zero bits in the operands + // we can guarantee that the result does not overflow. + // Ref: "Hacker's Delight" by Henry Warren + unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); + APInt LHSKnownZero(BitWidth, 0); + APInt RHSKnownZero(BitWidth, 0); + APInt TmpKnownOne(BitWidth, 0); + computeKnownBits(LHS, LHSKnownZero, TmpKnownOne, DL, /*Depth=*/0, AT, CxtI, DT); + computeKnownBits(RHS, RHSKnownZero, TmpKnownOne, DL, /*Depth=*/0, AT, CxtI, DT); + // Note that underestimating the number of zero bits gives a more + // conservative answer. + unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + + RHSKnownZero.countLeadingOnes(); + // First handle the easy case: if we have enough zero bits there's + // definitely no overflow. + if (ZeroBits >= BitWidth) + return OverflowResult::NeverOverflows; + + // Get the largest possible values for each operand. + APInt LHSMax = ~LHSKnownZero; + APInt RHSMax = ~RHSKnownZero; + + // We know the multiply operation doesn't overflow if the maximum values for + // each operand will not overflow after we multiply them together. + bool Overflow; + LHSMax.umul_ov(RHSMax, Overflow); + + return Overflow ? OverflowResult::MayOverflow + : OverflowResult::NeverOverflows; +} diff --git a/lib/Transforms/InstCombine/InstCombine.h b/lib/Transforms/InstCombine/InstCombine.h index b4d1efc1a92..96edc793175 100644 --- a/lib/Transforms/InstCombine/InstCombine.h +++ b/lib/Transforms/InstCombine/InstCombine.h @@ -286,7 +286,6 @@ private: bool WillNotOverflowSignedSub(Value *LHS, Value *RHS, Instruction *CxtI); bool WillNotOverflowUnsignedSub(Value *LHS, Value *RHS, Instruction *CxtI); bool WillNotOverflowSignedMul(Value *LHS, Value *RHS, Instruction *CxtI); - bool WillNotOverflowUnsignedMul(Value *LHS, Value *RHS, Instruction *CxtI); Value *EmitGEPOffset(User *GEP); Instruction *scalarizePHI(ExtractElementInst &EI, PHINode *PN); Value *EvaluateInDifferentElementOrder(Value *V, ArrayRef Mask); @@ -388,6 +387,10 @@ public: return llvm::ComputeSignBit(V, KnownZero, KnownOne, DL, Depth, AT, CxtI, DT); } + OverflowResult computeOverflowForUnsignedMul(Value *LHS, Value *RHS, + const Instruction *CxtI) { + return llvm::computeOverflowForUnsignedMul(LHS, RHS, DL, AT, CxtI, DT); + } private: /// SimplifyAssociativeOrCommutative - This performs a few simplifications for diff --git a/lib/Transforms/InstCombine/InstCombineCalls.cpp b/lib/Transforms/InstCombine/InstCombineCalls.cpp index ec6a61307e1..34caf1a5ab9 100644 --- a/lib/Transforms/InstCombine/InstCombineCalls.cpp +++ b/lib/Transforms/InstCombine/InstCombineCalls.cpp @@ -440,24 +440,8 @@ Instruction *InstCombiner::visitCallInst(CallInst &CI) { } case Intrinsic::umul_with_overflow: { Value *LHS = II->getArgOperand(0), *RHS = II->getArgOperand(1); - unsigned BitWidth = cast(LHS->getType())->getBitWidth(); - - APInt LHSKnownZero(BitWidth, 0); - APInt LHSKnownOne(BitWidth, 0); - computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, II); - APInt RHSKnownZero(BitWidth, 0); - APInt RHSKnownOne(BitWidth, 0); - computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, II); - - // Get the largest possible values for each operand. - APInt LHSMax = ~LHSKnownZero; - APInt RHSMax = ~RHSKnownZero; - - // If multiplying the maximum values does not overflow then we can turn - // this into a plain NUW mul. - bool Overflow; - LHSMax.umul_ov(RHSMax, Overflow); - if (!Overflow) { + OverflowResult OR = computeOverflowForUnsignedMul(LHS, RHS, II); + if (OR == OverflowResult::NeverOverflows) { return CreateOverflowTuple(II, Builder->CreateNUWMul(LHS, RHS), false); } } // FALL THROUGH diff --git a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp index d444d33ca8a..255e5872583 100644 --- a/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp +++ b/lib/Transforms/InstCombine/InstCombineMulDivRem.cpp @@ -165,39 +165,6 @@ bool InstCombiner::WillNotOverflowSignedMul(Value *LHS, Value *RHS, return false; } -/// \brief Return true if we can prove that: -/// (mul LHS, RHS) === (mul nuw LHS, RHS) -bool InstCombiner::WillNotOverflowUnsignedMul(Value *LHS, Value *RHS, - Instruction *CxtI) { - // Multiplying n * m significant bits yields a result of n + m significant - // bits. If the total number of significant bits does not exceed the - // result bit width (minus 1), there is no overflow. - // This means if we have enough leading zero bits in the operands - // we can guarantee that the result does not overflow. - // Ref: "Hacker's Delight" by Henry Warren - unsigned BitWidth = LHS->getType()->getScalarSizeInBits(); - APInt LHSKnownZero(BitWidth, 0); - APInt RHSKnownZero(BitWidth, 0); - APInt TmpKnownOne(BitWidth, 0); - computeKnownBits(LHS, LHSKnownZero, TmpKnownOne, 0, CxtI); - computeKnownBits(RHS, RHSKnownZero, TmpKnownOne, 0, CxtI); - // Note that underestimating the number of zero bits gives a more - // conservative answer. - unsigned ZeroBits = LHSKnownZero.countLeadingOnes() + - RHSKnownZero.countLeadingOnes(); - // First handle the easy case: if we have enough zero bits there's - // definitely no overflow. - if (ZeroBits >= BitWidth) - return true; - - // There is an ambiguous cases where there can be no overflow: - // ZeroBits == BitWidth - 1 - // However, determining overflow requires calculating the sign bit of - // LHS * RHS/2. - - return false; -} - Instruction *InstCombiner::visitMul(BinaryOperator &I) { bool Changed = SimplifyAssociativeOrCommutative(I); Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); @@ -413,7 +380,9 @@ Instruction *InstCombiner::visitMul(BinaryOperator &I) { I.setHasNoSignedWrap(true); } - if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedMul(Op0, Op1, &I)) { + if (!I.hasNoUnsignedWrap() && + computeOverflowForUnsignedMul(Op0, Op1, &I) == + OverflowResult::NeverOverflows) { Changed = true; I.setHasNoUnsignedWrap(true); } -- 2.34.1