#include "llvm/Operator.h"
#include "llvm/Analysis/ConstantFolding.h"
#include "llvm/Analysis/Dominators.h"
+#include "llvm/Analysis/InstructionSimplify.h"
#include "llvm/Analysis/LoopInfo.h"
#include "llvm/Analysis/ValueTracking.h"
#include "llvm/Assembly/Writer.h"
"derived loop"),
cl::init(100));
-INITIALIZE_PASS(ScalarEvolution, "scalar-evolution",
- "Scalar Evolution Analysis", false, true);
+INITIALIZE_PASS_BEGIN(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
+INITIALIZE_PASS_DEPENDENCY(LoopInfo)
+INITIALIZE_PASS_DEPENDENCY(DominatorTree)
+INITIALIZE_PASS_END(ScalarEvolution, "scalar-evolution",
+ "Scalar Evolution Analysis", false, true)
char ScalarEvolution::ID = 0;
//===----------------------------------------------------------------------===//
// Implementation of the SCEV class.
//
-SCEV::~SCEV() {}
-
void SCEV::dump() const {
print(dbgs());
dbgs() << '\n';
}
+void SCEV::print(raw_ostream &OS) const {
+ switch (getSCEVType()) {
+ case scConstant:
+ WriteAsOperand(OS, cast<SCEVConstant>(this)->getValue(), false);
+ return;
+ case scTruncate: {
+ const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(this);
+ const SCEV *Op = Trunc->getOperand();
+ OS << "(trunc " << *Op->getType() << " " << *Op << " to "
+ << *Trunc->getType() << ")";
+ return;
+ }
+ case scZeroExtend: {
+ const SCEVZeroExtendExpr *ZExt = cast<SCEVZeroExtendExpr>(this);
+ const SCEV *Op = ZExt->getOperand();
+ OS << "(zext " << *Op->getType() << " " << *Op << " to "
+ << *ZExt->getType() << ")";
+ return;
+ }
+ case scSignExtend: {
+ const SCEVSignExtendExpr *SExt = cast<SCEVSignExtendExpr>(this);
+ const SCEV *Op = SExt->getOperand();
+ OS << "(sext " << *Op->getType() << " " << *Op << " to "
+ << *SExt->getType() << ")";
+ return;
+ }
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(this);
+ OS << "{" << *AR->getOperand(0);
+ for (unsigned i = 1, e = AR->getNumOperands(); i != e; ++i)
+ OS << ",+," << *AR->getOperand(i);
+ OS << "}<";
+ if (AR->getNoWrapFlags(FlagNUW))
+ OS << "nuw><";
+ if (AR->getNoWrapFlags(FlagNSW))
+ OS << "nsw><";
+ if (AR->getNoWrapFlags(FlagNW) &&
+ !AR->getNoWrapFlags((NoWrapFlags)(FlagNUW | FlagNSW)))
+ OS << "nw><";
+ WriteAsOperand(OS, AR->getLoop()->getHeader(), /*PrintType=*/false);
+ OS << ">";
+ return;
+ }
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(this);
+ const char *OpStr = 0;
+ switch (NAry->getSCEVType()) {
+ case scAddExpr: OpStr = " + "; break;
+ case scMulExpr: OpStr = " * "; break;
+ case scUMaxExpr: OpStr = " umax "; break;
+ case scSMaxExpr: OpStr = " smax "; break;
+ }
+ OS << "(";
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ OS << **I;
+ if (llvm::next(I) != E)
+ OS << OpStr;
+ }
+ OS << ")";
+ return;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(this);
+ OS << "(" << *UDiv->getLHS() << " /u " << *UDiv->getRHS() << ")";
+ return;
+ }
+ case scUnknown: {
+ const SCEVUnknown *U = cast<SCEVUnknown>(this);
+ const Type *AllocTy;
+ if (U->isSizeOf(AllocTy)) {
+ OS << "sizeof(" << *AllocTy << ")";
+ return;
+ }
+ if (U->isAlignOf(AllocTy)) {
+ OS << "alignof(" << *AllocTy << ")";
+ return;
+ }
+
+ const Type *CTy;
+ Constant *FieldNo;
+ if (U->isOffsetOf(CTy, FieldNo)) {
+ OS << "offsetof(" << *CTy << ", ";
+ WriteAsOperand(OS, FieldNo, false);
+ OS << ")";
+ return;
+ }
+
+ // Otherwise just print it normally.
+ WriteAsOperand(OS, U->getValue(), false);
+ return;
+ }
+ case scCouldNotCompute:
+ OS << "***COULDNOTCOMPUTE***";
+ return;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+}
+
+const Type *SCEV::getType() const {
+ switch (getSCEVType()) {
+ case scConstant:
+ return cast<SCEVConstant>(this)->getType();
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return cast<SCEVCastExpr>(this)->getType();
+ case scAddRecExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr:
+ return cast<SCEVNAryExpr>(this)->getType();
+ case scAddExpr:
+ return cast<SCEVAddExpr>(this)->getType();
+ case scUDivExpr:
+ return cast<SCEVUDivExpr>(this)->getType();
+ case scUnknown:
+ return cast<SCEVUnknown>(this)->getType();
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return 0;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return 0;
+}
+
bool SCEV::isZero() const {
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
return SC->getValue()->isZero();
SCEVCouldNotCompute::SCEVCouldNotCompute() :
SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
-bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-const Type *SCEVCouldNotCompute::getType() const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return 0;
-}
-
-bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-bool SCEVCouldNotCompute::hasOperand(const SCEV *) const {
- llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
- return false;
-}
-
-void SCEVCouldNotCompute::print(raw_ostream &OS) const {
- OS << "***COULDNOTCOMPUTE***";
-}
-
bool SCEVCouldNotCompute::classof(const SCEV *S) {
return S->getSCEVType() == scCouldNotCompute;
}
return getConstant(ConstantInt::get(ITy, V, isSigned));
}
-const Type *SCEVConstant::getType() const { return V->getType(); }
-
-void SCEVConstant::print(raw_ostream &OS) const {
- WriteAsOperand(OS, V, false);
-}
-
SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
-bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return Op->dominates(BB, DT);
-}
-
-bool SCEVCastExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return Op->properlyDominates(BB, DT);
-}
-
SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
"Cannot truncate non-integer value!");
}
-void SCEVTruncateExpr::print(raw_ostream &OS) const {
- OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
"Cannot zero extend non-integer value!");
}
-void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
- OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
"Cannot sign extend non-integer value!");
}
-void SCEVSignExtendExpr::print(raw_ostream &OS) const {
- OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
-}
-
-void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- const char *OpStr = getOperationStr();
- OS << "(";
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- OS << **I;
- if (llvm::next(I) != E)
- OS << OpStr;
- }
- OS << ")";
-}
-
-bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->dominates(BB, DT))
- return false;
- return true;
-}
-
-bool SCEVNAryExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->properlyDominates(BB, DT))
- return false;
- return true;
-}
-
-bool SCEVNAryExpr::isLoopInvariant(const Loop *L) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I)
- if (!(*I)->isLoopInvariant(L))
- return false;
- return true;
-}
-
-// hasComputableLoopEvolution - N-ary expressions have computable loop
-// evolutions iff they have at least one operand that varies with the loop,
-// but that all varying operands are computable.
-bool SCEVNAryExpr::hasComputableLoopEvolution(const Loop *L) const {
- bool HasVarying = false;
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- const SCEV *S = *I;
- if (!S->isLoopInvariant(L)) {
- if (S->hasComputableLoopEvolution(L))
- HasVarying = true;
- else
- return false;
- }
- }
- return HasVarying;
-}
-
-bool SCEVNAryExpr::hasOperand(const SCEV *O) const {
- for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
- const SCEV *S = *I;
- if (O == S || S->hasOperand(O))
- return true;
- }
- return false;
-}
-
-bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
-}
-
-bool SCEVUDivExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- return LHS->properlyDominates(BB, DT) && RHS->properlyDominates(BB, DT);
-}
-
-void SCEVUDivExpr::print(raw_ostream &OS) const {
- OS << "(" << *LHS << " /u " << *RHS << ")";
-}
-
-const Type *SCEVUDivExpr::getType() const {
- // In most cases the types of LHS and RHS will be the same, but in some
- // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
- // depend on the type for correctness, but handling types carefully can
- // avoid extra casts in the SCEVExpander. The LHS is more likely to be
- // a pointer type than the RHS, so use the RHS' type here.
- return RHS->getType();
-}
-
-bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
- // Add recurrences are never invariant in the function-body (null loop).
- if (!QueryLoop)
- return false;
-
- // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
- if (QueryLoop->contains(L))
- return false;
-
- // This recurrence is invariant w.r.t. QueryLoop if L contains QueryLoop.
- if (L->contains(QueryLoop))
- return true;
-
- // This recurrence is variant w.r.t. QueryLoop if any of its operands
- // are variant.
- for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
- if (!getOperand(i)->isLoopInvariant(QueryLoop))
- return false;
-
- // Otherwise it's loop-invariant.
- return true;
-}
-
-bool
-SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
- return DT->dominates(L->getHeader(), BB) &&
- SCEVNAryExpr::dominates(BB, DT);
-}
-
-bool
-SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- // This uses a "dominates" query instead of "properly dominates" query because
- // the instruction which produces the addrec's value is a PHI, and a PHI
- // effectively properly dominates its entire containing block.
- return DT->dominates(L->getHeader(), BB) &&
- SCEVNAryExpr::properlyDominates(BB, DT);
-}
-
-void SCEVAddRecExpr::print(raw_ostream &OS) const {
- OS << "{" << *Operands[0];
- for (unsigned i = 1, e = NumOperands; i != e; ++i)
- OS << ",+," << *Operands[i];
- OS << "}<";
- WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
- OS << ">";
-}
-
void SCEVUnknown::deleted() {
- // Clear this SCEVUnknown from ValuesAtScopes.
- SE->ValuesAtScopes.erase(this);
+ // Clear this SCEVUnknown from various maps.
+ SE->forgetMemoizedResults(this);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
}
void SCEVUnknown::allUsesReplacedWith(Value *New) {
- // Clear this SCEVUnknown from ValuesAtScopes.
- SE->ValuesAtScopes.erase(this);
+ // Clear this SCEVUnknown from various maps.
+ SE->forgetMemoizedResults(this);
// Remove this SCEVUnknown from the uniquing map.
SE->UniqueSCEVs.RemoveNode(this);
setValPtr(New);
}
-bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
- // All non-instruction values are loop invariant. All instructions are loop
- // invariant if they are not contained in the specified loop.
- // Instructions are never considered invariant in the function body
- // (null loop) because they are defined within the "loop".
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return L && !L->contains(I);
- return true;
-}
-
-bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return DT->dominates(I->getParent(), BB);
- return true;
-}
-
-bool SCEVUnknown::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
- if (Instruction *I = dyn_cast<Instruction>(getValue()))
- return DT->properlyDominates(I->getParent(), BB);
- return true;
-}
-
-const Type *SCEVUnknown::getType() const {
- return getValue()->getType();
-}
-
bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(getValue()))
if (VCE->getOpcode() == Instruction::PtrToInt)
return false;
}
-void SCEVUnknown::print(raw_ostream &OS) const {
- const Type *AllocTy;
- if (isSizeOf(AllocTy)) {
- OS << "sizeof(" << *AllocTy << ")";
- return;
- }
- if (isAlignOf(AllocTy)) {
- OS << "alignof(" << *AllocTy << ")";
- return;
- }
-
- const Type *CTy;
- Constant *FieldNo;
- if (isOffsetOf(CTy, FieldNo)) {
- OS << "offsetof(" << *CTy << ", ";
- WriteAsOperand(OS, FieldNo, false);
- OS << ")";
- return;
- }
-
- // Otherwise just print it normally.
- WriteAsOperand(OS, getValue(), false);
-}
-
//===----------------------------------------------------------------------===//
// SCEV Utilities
//===----------------------------------------------------------------------===//
public:
explicit SCEVComplexityCompare(const LoopInfo *li) : LI(li) {}
+ // Return true or false if LHS is less than, or at least RHS, respectively.
bool operator()(const SCEV *LHS, const SCEV *RHS) const {
+ return compare(LHS, RHS) < 0;
+ }
+
+ // Return negative, zero, or positive, if LHS is less than, equal to, or
+ // greater than RHS, respectively. A three-way result allows recursive
+ // comparisons to be more efficient.
+ int compare(const SCEV *LHS, const SCEV *RHS) const {
// Fast-path: SCEVs are uniqued so we can do a quick equality check.
if (LHS == RHS)
- return false;
+ return 0;
// Primarily, sort the SCEVs by their getSCEVType().
unsigned LType = LHS->getSCEVType(), RType = RHS->getSCEVType();
if (LType != RType)
- return LType < RType;
+ return (int)LType - (int)RType;
// Aside from the getSCEVType() ordering, the particular ordering
// isn't very important except that it's beneficial to be consistent,
// so that (a + b) and (b + a) don't end up as different expressions.
-
- // Sort SCEVUnknown values with some loose heuristics. TODO: This is
- // not as complete as it could be.
- if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
+ switch (LType) {
+ case scUnknown: {
+ const SCEVUnknown *LU = cast<SCEVUnknown>(LHS);
const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
+
+ // Sort SCEVUnknown values with some loose heuristics. TODO: This is
+ // not as complete as it could be.
const Value *LV = LU->getValue(), *RV = RU->getValue();
// Order pointer values after integer values. This helps SCEVExpander
bool LIsPointer = LV->getType()->isPointerTy(),
RIsPointer = RV->getType()->isPointerTy();
if (LIsPointer != RIsPointer)
- return RIsPointer;
+ return (int)LIsPointer - (int)RIsPointer;
// Compare getValueID values.
unsigned LID = LV->getValueID(),
RID = RV->getValueID();
if (LID != RID)
- return LID < RID;
+ return (int)LID - (int)RID;
// Sort arguments by their position.
if (const Argument *LA = dyn_cast<Argument>(LV)) {
const Argument *RA = cast<Argument>(RV);
- return LA->getArgNo() < RA->getArgNo();
+ unsigned LArgNo = LA->getArgNo(), RArgNo = RA->getArgNo();
+ return (int)LArgNo - (int)RArgNo;
}
- // For instructions, compare their loop depth, and their opcode.
- // This is pretty loose.
+ // For instructions, compare their loop depth, and their operand
+ // count. This is pretty loose.
if (const Instruction *LInst = dyn_cast<Instruction>(LV)) {
const Instruction *RInst = cast<Instruction>(RV);
unsigned LDepth = LI->getLoopDepth(LParent),
RDepth = LI->getLoopDepth(RParent);
if (LDepth != RDepth)
- return LDepth < RDepth;
+ return (int)LDepth - (int)RDepth;
}
// Compare the number of operands.
unsigned LNumOps = LInst->getNumOperands(),
RNumOps = RInst->getNumOperands();
- if (LNumOps != RNumOps)
- return LNumOps < RNumOps;
+ return (int)LNumOps - (int)RNumOps;
}
- return false;
+ return 0;
}
- // Compare constant values.
- if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
+ case scConstant: {
+ const SCEVConstant *LC = cast<SCEVConstant>(LHS);
const SCEVConstant *RC = cast<SCEVConstant>(RHS);
+
+ // Compare constant values.
const APInt &LA = LC->getValue()->getValue();
const APInt &RA = RC->getValue()->getValue();
unsigned LBitWidth = LA.getBitWidth(), RBitWidth = RA.getBitWidth();
if (LBitWidth != RBitWidth)
- return LBitWidth < RBitWidth;
- return LA.ult(RA);
+ return (int)LBitWidth - (int)RBitWidth;
+ return LA.ult(RA) ? -1 : 1;
}
- // Compare addrec loop depths.
- if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *LA = cast<SCEVAddRecExpr>(LHS);
const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
+
+ // Compare addrec loop depths.
const Loop *LLoop = LA->getLoop(), *RLoop = RA->getLoop();
if (LLoop != RLoop) {
unsigned LDepth = LLoop->getLoopDepth(),
RDepth = RLoop->getLoopDepth();
if (LDepth != RDepth)
- return LDepth < RDepth;
+ return (int)LDepth - (int)RDepth;
+ }
+
+ // Addrec complexity grows with operand count.
+ unsigned LNumOps = LA->getNumOperands(), RNumOps = RA->getNumOperands();
+ if (LNumOps != RNumOps)
+ return (int)LNumOps - (int)RNumOps;
+
+ // Lexicographically compare.
+ for (unsigned i = 0; i != LNumOps; ++i) {
+ long X = compare(LA->getOperand(i), RA->getOperand(i));
+ if (X != 0)
+ return X;
}
+
+ return 0;
}
- // Lexicographically compare n-ary expressions.
- if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
+ case scAddExpr:
+ case scMulExpr:
+ case scSMaxExpr:
+ case scUMaxExpr: {
+ const SCEVNAryExpr *LC = cast<SCEVNAryExpr>(LHS);
const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
+
+ // Lexicographically compare n-ary expressions.
unsigned LNumOps = LC->getNumOperands(), RNumOps = RC->getNumOperands();
for (unsigned i = 0; i != LNumOps; ++i) {
if (i >= RNumOps)
- return false;
- const SCEV *LOp = LC->getOperand(i), *ROp = RC->getOperand(i);
- if (operator()(LOp, ROp))
- return true;
- if (operator()(ROp, LOp))
- return false;
+ return 1;
+ long X = compare(LC->getOperand(i), RC->getOperand(i));
+ if (X != 0)
+ return X;
}
- return LNumOps < RNumOps;
+ return (int)LNumOps - (int)RNumOps;
}
- // Lexicographically compare udiv expressions.
- if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
+ case scUDivExpr: {
+ const SCEVUDivExpr *LC = cast<SCEVUDivExpr>(LHS);
const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
- const SCEV *LL = LC->getLHS(), *LR = LC->getRHS(),
- *RL = RC->getLHS(), *RR = RC->getRHS();
- if (operator()(LL, RL))
- return true;
- if (operator()(RL, LL))
- return false;
- if (operator()(LR, RR))
- return true;
- if (operator()(RR, LR))
- return false;
- return false;
+
+ // Lexicographically compare udiv expressions.
+ long X = compare(LC->getLHS(), RC->getLHS());
+ if (X != 0)
+ return X;
+ return compare(LC->getRHS(), RC->getRHS());
}
- // Compare cast expressions by operand.
- if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend: {
+ const SCEVCastExpr *LC = cast<SCEVCastExpr>(LHS);
const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
- return operator()(LC->getOperand(), RC->getOperand());
+
+ // Compare cast expressions by operand.
+ return compare(LC->getOperand(), RC->getOperand());
+ }
+
+ default:
+ break;
}
llvm_unreachable("Unknown SCEV kind!");
- return false;
+ return 0;
}
};
}
if (Ops.size() == 2) {
// This is the common case, which also happens to be trivially simple.
// Special case it.
- if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
- std::swap(Ops[0], Ops[1]);
+ const SCEV *&LHS = Ops[0], *&RHS = Ops[1];
+ if (SCEVComplexityCompare(LI)(RHS, LHS))
+ std::swap(LHS, RHS);
return;
}
if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
+ // trunc(x1+x2+...+xN) --> trunc(x1)+trunc(x2)+...+trunc(xN) if we can
+ // eliminate all the truncates.
+ if (const SCEVAddExpr *SA = dyn_cast<SCEVAddExpr>(Op)) {
+ SmallVector<const SCEV *, 4> Operands;
+ bool hasTrunc = false;
+ for (unsigned i = 0, e = SA->getNumOperands(); i != e && !hasTrunc; ++i) {
+ const SCEV *S = getTruncateExpr(SA->getOperand(i), Ty);
+ hasTrunc = isa<SCEVTruncateExpr>(S);
+ Operands.push_back(S);
+ }
+ if (!hasTrunc)
+ return getAddExpr(Operands);
+ UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
+ }
+
+ // trunc(x1*x2*...*xN) --> trunc(x1)*trunc(x2)*...*trunc(xN) if we can
+ // eliminate all the truncates.
+ if (const SCEVMulExpr *SM = dyn_cast<SCEVMulExpr>(Op)) {
+ SmallVector<const SCEV *, 4> Operands;
+ bool hasTrunc = false;
+ for (unsigned i = 0, e = SM->getNumOperands(); i != e && !hasTrunc; ++i) {
+ const SCEV *S = getTruncateExpr(SM->getOperand(i), Ty);
+ hasTrunc = isa<SCEVTruncateExpr>(S);
+ Operands.push_back(S);
+ }
+ if (!hasTrunc)
+ return getMulExpr(Operands);
+ UniqueSCEVs.FindNodeOrInsertPos(ID, IP); // Mutates IP, returns NULL.
+ }
+
// If the input value is a chrec scev, truncate the chrec's operands.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
SmallVector<const SCEV *, 4> Operands;
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
- return getAddRecExpr(Operands, AddRec->getLoop());
+ return getAddRecExpr(Operands, AddRec->getLoop(), SCEV::FlagAnyWrap);
}
// As a special case, fold trunc(undef) to undef. We don't want to
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // zext(trunc(x)) --> zext(x) or x or trunc(x)
+ if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
+ // It's possible the bits taken off by the truncate were all zero bits. If
+ // so, we should be able to simplify this further.
+ const SCEV *X = ST->getOperand();
+ ConstantRange CR = getUnsignedRange(X);
+ unsigned TruncBits = getTypeSizeInBits(ST->getType());
+ unsigned NewBits = getTypeSizeInBits(Ty);
+ if (CR.truncate(TruncBits).zeroExtend(NewBits).contains(
+ CR.zextOrTrunc(NewBits)))
+ return getTruncateOrZeroExtend(X, Ty);
+ }
+
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can zero extend all of the
// operands (often constants). This allows analysis of something like
// If we have special knowledge that this addrec won't overflow,
// we don't need to do any further analysis.
- if (AR->hasNoUnsignedWrap())
+ if (AR->getNoWrapFlags(SCEV::FlagNUW))
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getZeroExtendExpr(Step, Ty),
- L);
+ // FIXME: Can use SCEV::FlagNUW
+ L, SCEV::FlagAnyWrap);
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
// Return the expression with the addrec on the outside.
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getZeroExtendExpr(Step, Ty),
- L);
+ // FIXME: can use FlagNUW
+ L, SCEV::FlagAnyWrap);
// Similar to above, only this time treat the step value as signed.
// This covers loops that count down.
// Return the expression with the addrec on the outside.
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use FlagNW
+ L, SCEV::FlagAnyWrap);
}
// If the backedge is guarded by a comparison with the pre-inc value
// Return the expression with the addrec on the outside.
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getZeroExtendExpr(Step, Ty),
- L);
+ // FIXME: can use FlagNUW
+ L, SCEV::FlagAnyWrap);
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
(isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
AR->getPostIncExpr(*this), N)))
- // Return the expression with the addrec on the outside.
+ // Return the expression with the addrec on the outside. The
+ // negative step causes unsigned wrap, but it still can't self-wrap.
return getAddRecExpr(getZeroExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use FlagNW
+ L, SCEV::FlagAnyWrap);
}
}
}
if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
return getSignExtendExpr(SS->getOperand(), Ty);
+ // sext(zext(x)) --> zext(x)
+ if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
+ return getZeroExtendExpr(SZ->getOperand(), Ty);
+
// Before doing any expensive analysis, check to see if we've already
// computed a SCEV for this Op and Ty.
FoldingSetNodeID ID;
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
+ // If the input value is provably positive, build a zext instead.
+ if (isKnownNonNegative(Op))
+ return getZeroExtendExpr(Op, Ty);
+
+ // sext(trunc(x)) --> sext(x) or x or trunc(x)
+ if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op)) {
+ // It's possible the bits taken off by the truncate were all sign bits. If
+ // so, we should be able to simplify this further.
+ const SCEV *X = ST->getOperand();
+ ConstantRange CR = getSignedRange(X);
+ unsigned TruncBits = getTypeSizeInBits(ST->getType());
+ unsigned NewBits = getTypeSizeInBits(Ty);
+ if (CR.truncate(TruncBits).signExtend(NewBits).contains(
+ CR.sextOrTrunc(NewBits)))
+ return getTruncateOrSignExtend(X, Ty);
+ }
+
// If the input value is a chrec scev, and we can prove that the value
// did not overflow the old, smaller, value, we can sign extend all of the
// operands (often constants). This allows analysis of something like
// If we have special knowledge that this addrec won't overflow,
// we don't need to do any further analysis.
- if (AR->hasNoSignedWrap())
+ if (AR->getNoWrapFlags(SCEV::FlagNSW))
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use SCEV::FlagNSW
+ L, SCEV::FlagAnyWrap);
// Check whether the backedge-taken count is SCEVCouldNotCompute.
// Note that this serves two purposes: It filters out loops that are
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use SCEV::FlagNSW
+ L, SCEV::FlagAnyWrap);
// Similar to above, only this time treat the step value as unsigned.
// This covers loops that count up with an unsigned step.
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getZeroExtendExpr(Step, Ty),
- L);
+ // FIXME: can use SCEV::FlagNSW
+ L, SCEV::FlagAnyWrap);
}
// If the backedge is guarded by a comparison with the pre-inc value
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use SCEV::FlagNSW
+ L, SCEV::FlagAnyWrap);
} else if (isKnownNegative(Step)) {
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
// Return the expression with the addrec on the outside.
return getAddRecExpr(getSignExtendExpr(Start, Ty),
getSignExtendExpr(Step, Ty),
- L);
+ // FIXME: can use SCEV::FlagNSW
+ L, SCEV::FlagAnyWrap);
}
}
}
for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I)
Ops.push_back(getAnyExtendExpr(*I, Ty));
- return getAddRecExpr(Ops, AR->getLoop());
+ // FIXME: can use AR->getNoWrapFlags(SCEV::FlagNW)
+ return getAddRecExpr(Ops, AR->getLoop(), SCEV::FlagAnyWrap);
}
// As a special case, fold anyext(undef) to undef. We don't want to
/// getAddExpr - Get a canonical add expression, or something simpler if
/// possible.
const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops,
- bool HasNUW, bool HasNSW) {
+ SCEV::NoWrapFlags Flags) {
+ assert(!(Flags & ~(SCEV::FlagNUW | SCEV::FlagNSW)) &&
+ "only nuw or nsw allowed");
assert(!Ops.empty() && "Cannot get empty add!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
"SCEVAddExpr operand types don't match!");
#endif
- // If HasNSW is true and all the operands are non-negative, infer HasNUW.
- if (!HasNUW && HasNSW) {
+ // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
+ if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
E = Ops.end(); I != E; ++I)
All = false;
break;
}
- if (All) HasNUW = true;
+ if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
}
// Sort by complexity, this groups all similar expression types together.
if (Ops.size() == 1) return Ops[0];
}
- // Okay, check to see if the same value occurs in the operand list twice. If
- // so, merge them together into an multiply expression. Since we sorted the
- // list, these values are required to be adjacent.
+ // Okay, check to see if the same value occurs in the operand list more than
+ // once. If so, merge them together into an multiply expression. Since we
+ // sorted the list, these values are required to be adjacent.
const Type *Ty = Ops[0]->getType();
bool FoundMatch = false;
- for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
+ for (unsigned i = 0, e = Ops.size(); i != e-1; ++i)
if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
- // Found a match, merge the two values into a multiply, and add any
- // remaining values to the result.
- const SCEV *Two = getConstant(Ty, 2);
- const SCEV *Mul = getMulExpr(Two, Ops[i]);
- if (Ops.size() == 2)
+ // Scan ahead to count how many equal operands there are.
+ unsigned Count = 2;
+ while (i+Count != e && Ops[i+Count] == Ops[i])
+ ++Count;
+ // Merge the values into a multiply.
+ const SCEV *Scale = getConstant(Ty, Count);
+ const SCEV *Mul = getMulExpr(Scale, Ops[i]);
+ if (Ops.size() == Count)
return Mul;
Ops[i] = Mul;
- Ops.erase(Ops.begin()+i+1);
- --i; --e;
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+Count);
+ --i; e -= Count - 1;
FoundMatch = true;
}
if (FoundMatch)
- return getAddExpr(Ops, HasNUW, HasNSW);
+ return getAddExpr(Ops, Flags);
// Check for truncates. If all the operands are truncated from the same
// type, see if factoring out the truncate would permit the result to be
}
if (Ok) {
// Evaluate the expression in the larger type.
- const SCEV *Fold = getAddExpr(LargeOps, HasNUW, HasNSW);
+ const SCEV *Fold = getAddExpr(LargeOps, Flags);
// If it folds to something simple, use it. Otherwise, don't.
if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
return getTruncateExpr(Fold, DstType);
}
// Check this multiply against other multiplies being added together.
- bool AnyFold = false;
for (unsigned OtherMulIdx = Idx+1;
OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
++OtherMulIdx) {
const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
if (Ops.size() == 2) return OuterMul;
- Ops[Idx] = OuterMul;
- Ops.erase(Ops.begin()+OtherMulIdx);
- OtherMulIdx = Idx;
- AnyFold = true;
+ Ops.erase(Ops.begin()+Idx);
+ Ops.erase(Ops.begin()+OtherMulIdx-1);
+ Ops.push_back(OuterMul);
+ return getAddExpr(Ops);
}
}
- if (AnyFold)
- return getAddExpr(Ops);
}
}
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRecLoop)) {
+ if (isLoopInvariant(Ops[i], AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
// Build the new addrec. Propagate the NUW and NSW flags if both the
// outer add and the inner addrec are guaranteed to have no overflow.
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop,
- HasNUW && AddRec->hasNoUnsignedWrap(),
- HasNSW && AddRec->hasNoSignedWrap());
+ // FIXME: Always propagate NW
+ // AddRec->getNoWrapFlags(setFlags(Flags, SCEV::FlagNW))
+ Flags = AddRec->getNoWrapFlags(Flags);
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop, Flags);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
// there are multiple AddRec's with the same loop induction variable being
// added together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRecLoop == OtherAddRec->getLoop()) {
- // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
- SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
- AddRec->op_end());
- for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
- if (i >= NewOps.size()) {
- NewOps.append(OtherAddRec->op_begin()+i,
- OtherAddRec->op_end());
- break;
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // Other + {A,+,B}<L> + {C,+,D}<L> --> Other + {A+C,+,B+D}<L>
+ SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
+ AddRec->op_end());
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ for (unsigned i = 0, e = OtherAddRec->getNumOperands();
+ i != e; ++i) {
+ if (i >= AddRecOps.size()) {
+ AddRecOps.append(OtherAddRec->op_begin()+i,
+ OtherAddRec->op_end());
+ break;
+ }
+ AddRecOps[i] = getAddExpr(AddRecOps[i],
+ OtherAddRec->getOperand(i));
+ }
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
}
- NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
- }
- const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
-
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getAddExpr(Ops);
- }
+ // Step size has changed, so we cannot guarantee no self-wraparound.
+ Ops[Idx] = getAddRecExpr(AddRecOps, AddRecLoop, SCEV::FlagAnyWrap);
+ return getAddExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scAddExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
- if (HasNUW) S->setHasNoUnsignedWrap(true);
- if (HasNSW) S->setHasNoSignedWrap(true);
+ S->setNoWrapFlags(Flags);
return S;
}
/// getMulExpr - Get a canonical multiply expression, or something simpler if
/// possible.
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
- bool HasNUW, bool HasNSW) {
+ SCEV::NoWrapFlags Flags) {
+ assert(Flags == maskFlags(Flags, SCEV::FlagNUW | SCEV::FlagNSW) &&
+ "only nuw or nsw allowed");
assert(!Ops.empty() && "Cannot get empty mul!");
if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
"SCEVMulExpr operand types don't match!");
#endif
- // If HasNSW is true and all the operands are non-negative, infer HasNUW.
- if (!HasNUW && HasNSW) {
+ // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
+ if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Ops.begin(),
E = Ops.end(); I != E; ++I)
All = false;
break;
}
- if (All) HasNUW = true;
+ if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
}
// Sort by complexity, this groups all similar expression types together.
} else if (Ops[0]->isAllOnesValue()) {
// If we have a mul by -1 of an add, try distributing the -1 among the
// add operands.
- if (Ops.size() == 2)
+ if (Ops.size() == 2) {
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
SmallVector<const SCEV *, 4> NewOps;
bool AnyFolded = false;
- for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
- I != E; ++I) {
+ for (SCEVAddRecExpr::op_iterator I = Add->op_begin(),
+ E = Add->op_end(); I != E; ++I) {
const SCEV *Mul = getMulExpr(Ops[0], *I);
if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
NewOps.push_back(Mul);
if (AnyFolded)
return getAddExpr(NewOps);
}
+ else if (const SCEVAddRecExpr *
+ AddRec = dyn_cast<SCEVAddRecExpr>(Ops[1])) {
+ // Negation preserves a recurrence's no self-wrap property.
+ SmallVector<const SCEV *, 4> Operands;
+ for (SCEVAddRecExpr::op_iterator I = AddRec->op_begin(),
+ E = AddRec->op_end(); I != E; ++I) {
+ Operands.push_back(getMulExpr(Ops[0], *I));
+ }
+ return getAddRecExpr(Operands, AddRec->getLoop(),
+ AddRec->getNoWrapFlags(SCEV::FlagNW));
+ }
+ }
}
if (Ops.size() == 1)
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (isLoopInvariant(Ops[i], AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
// Build the new addrec. Propagate the NUW and NSW flags if both the
// outer mul and the inner addrec are guaranteed to have no overflow.
- const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
- HasNUW && AddRec->hasNoUnsignedWrap(),
- HasNSW && AddRec->hasNoSignedWrap());
+ //
+ // No self-wrap cannot be guaranteed after changing the step size, but
+ // will be infered if either NUW or NSW is true.
+ Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW));
+ const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
// there are multiple AddRec's with the same loop induction variable being
// multiplied together. If so, we can fold them.
for (unsigned OtherIdx = Idx+1;
- OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
- if (OtherIdx != Idx) {
- const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
- // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
- const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
- const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
- const SCEV *B = F->getStepRecurrence(*this);
- const SCEV *D = G->getStepRecurrence(*this);
- const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
- getMulExpr(G, B),
- getMulExpr(B, D));
- const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
- F->getLoop());
- if (Ops.size() == 2) return NewAddRec;
-
- Ops.erase(Ops.begin()+Idx);
- Ops.erase(Ops.begin()+OtherIdx-1);
- Ops.push_back(NewAddRec);
- return getMulExpr(Ops);
- }
+ OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (AddRecLoop == cast<SCEVAddRecExpr>(Ops[OtherIdx])->getLoop()) {
+ // F * G, where F = {A,+,B}<L> and G = {C,+,D}<L> -->
+ // {A*C,+,F*D + G*B + B*D}<L>
+ for (; OtherIdx != Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);
+ ++OtherIdx)
+ if (const SCEVAddRecExpr *OtherAddRec =
+ dyn_cast<SCEVAddRecExpr>(Ops[OtherIdx]))
+ if (OtherAddRec->getLoop() == AddRecLoop) {
+ const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
+ const SCEV *NewStart = getMulExpr(F->getStart(), G->getStart());
+ const SCEV *B = F->getStepRecurrence(*this);
+ const SCEV *D = G->getStepRecurrence(*this);
+ const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
+ getMulExpr(G, B),
+ getMulExpr(B, D));
+ const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
+ F->getLoop(),
+ SCEV::FlagAnyWrap);
+ if (Ops.size() == 2) return NewAddRec;
+ Ops[Idx] = AddRec = cast<SCEVAddRecExpr>(NewAddRec);
+ Ops.erase(Ops.begin() + OtherIdx); --OtherIdx;
+ }
+ return getMulExpr(Ops);
}
// Otherwise couldn't fold anything into this recurrence. Move onto the
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scMulExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
}
- if (HasNUW) S->setHasNoUnsignedWrap(true);
- if (HasNSW) S->setHasNoSignedWrap(true);
+ S->setNoWrapFlags(Flags);
return S;
}
getZeroExtendExpr(AR, ExtTy) ==
getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
getZeroExtendExpr(Step, ExtTy),
- AR->getLoop())) {
+ AR->getLoop(), SCEV::FlagAnyWrap)) {
SmallVector<const SCEV *, 4> Operands;
for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
- return getAddRecExpr(Operands, AR->getLoop());
+ return getAddRecExpr(Operands, AR->getLoop(),
+ // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
+ SCEV::FlagAnyWrap);
}
// (A*B)/C --> A*(B/C) if safe and B/C can be folded.
if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
/// Simplify the expression as much as possible.
-const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
- const SCEV *Step, const Loop *L,
- bool HasNUW, bool HasNSW) {
+const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start, const SCEV *Step,
+ const Loop *L,
+ SCEV::NoWrapFlags Flags) {
SmallVector<const SCEV *, 4> Operands;
Operands.push_back(Start);
if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
if (StepChrec->getLoop() == L) {
Operands.append(StepChrec->op_begin(), StepChrec->op_end());
- return getAddRecExpr(Operands, L);
+ // FIXME: can use maskFlags(Flags, SCEV::FlagNW)
+ return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap);
}
Operands.push_back(Step);
- return getAddRecExpr(Operands, L, HasNUW, HasNSW);
+ return getAddRecExpr(Operands, L, Flags);
}
/// getAddRecExpr - Get an add recurrence expression for the specified loop.
/// Simplify the expression as much as possible.
const SCEV *
ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
- const Loop *L,
- bool HasNUW, bool HasNSW) {
+ const Loop *L, SCEV::NoWrapFlags Flags) {
if (Operands.size() == 1) return Operands[0];
#ifndef NDEBUG
const Type *ETy = getEffectiveSCEVType(Operands[0]->getType());
for (unsigned i = 1, e = Operands.size(); i != e; ++i)
assert(getEffectiveSCEVType(Operands[i]->getType()) == ETy &&
"SCEVAddRecExpr operand types don't match!");
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ assert(isLoopInvariant(Operands[i], L) &&
+ "SCEVAddRecExpr operand is not loop-invariant!");
#endif
if (Operands.back()->isZero()) {
Operands.pop_back();
- return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X
+ return getAddRecExpr(Operands, L, SCEV::FlagAnyWrap); // {X,+,0} --> X
}
// It's tempting to want to call getMaxBackedgeTakenCount count here and
// meaningful BE count at this point (and if we don't, we'd be stuck
// with a SCEVCouldNotCompute as the cached BE count).
- // If HasNSW is true and all the operands are non-negative, infer HasNUW.
- if (!HasNUW && HasNSW) {
+ // If FlagNSW is true and all the operands are non-negative, infer FlagNUW.
+ if (!(Flags & SCEV::FlagNUW) && (Flags & SCEV::FlagNSW)) {
bool All = true;
for (SmallVectorImpl<const SCEV *>::const_iterator I = Operands.begin(),
E = Operands.end(); I != E; ++I)
All = false;
break;
}
- if (All) HasNUW = true;
+ if (All) Flags = setFlags(Flags, SCEV::FlagNUW);
}
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
// requirement.
bool AllInvariant = true;
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
- if (!Operands[i]->isLoopInvariant(L)) {
+ if (!isLoopInvariant(Operands[i], L)) {
AllInvariant = false;
break;
}
if (AllInvariant) {
- NestedOperands[0] = getAddRecExpr(Operands, L);
+ // Create a recurrence for the outer loop with the same step size.
+ //
+ // FIXME:
+ // The outer recurrence keeps its NW flag but only keeps NUW/NSW if the
+ // inner recurrence has the same property.
+ // maskFlags(Flags, SCEV::FlagNW | NestedAR->getNoWrapFlags());
+ SCEV::NoWrapFlags OuterFlags = SCEV::FlagAnyWrap;
+
+ NestedOperands[0] = getAddRecExpr(Operands, L, OuterFlags);
AllInvariant = true;
for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
- if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
+ if (!isLoopInvariant(NestedOperands[i], NestedLoop)) {
AllInvariant = false;
break;
}
- if (AllInvariant)
+ if (AllInvariant) {
// Ok, both add recurrences are valid after the transformation.
- return getAddRecExpr(NestedOperands, NestedLoop, HasNUW, HasNSW);
+ //
+ // FIXME:
+ // The inner recurrence keeps its NW flag but only keeps NUW/NSW if
+ // the outer recurrence has the same property.
+ // maskFlags(NestedAR->getNoWrapFlags(), SCEV::FlagNW | Flags);
+ SCEV::NoWrapFlags InnerFlags = SCEV::FlagAnyWrap;
+ return getAddRecExpr(NestedOperands, NestedLoop, InnerFlags);
+ }
}
// Reset Operands to its original state.
Operands[0] = NestedAR;
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
- ID.AddInteger(Operands.size());
for (unsigned i = 0, e = Operands.size(); i != e; ++i)
ID.AddPointer(Operands[i]);
ID.AddPointer(L);
O, Operands.size(), L);
UniqueSCEVs.InsertNode(S, IP);
}
- if (HasNUW) S->setHasNoUnsignedWrap(true);
- if (HasNSW) S->setHasNoSignedWrap(true);
+ S->setNoWrapFlags(Flags);
return S;
}
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scSMaxExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
// already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scUMaxExpr);
- ID.AddInteger(Ops.size());
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
const SCEV *ScalarEvolution::getSCEV(Value *V) {
assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
- std::map<SCEVCallbackVH, const SCEV *>::const_iterator I = Scalars.find(V);
- if (I != Scalars.end()) return I->second;
+ ValueExprMapType::const_iterator I = ValueExprMap.find(V);
+ if (I != ValueExprMap.end()) return I->second;
const SCEV *S = createSCEV(V);
// The process of creating a SCEV for V may have caused other SCEVs
// to have been created, so it's necessary to insert the new entry
// from scratch, rather than trying to remember the insert position
// above.
- Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(V, this), S));
return S;
}
return getMinusSCEV(AllOnes, V);
}
-/// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
+/// getMinusSCEV - Return LHS-RHS. Minus is represented in SCEV as A+B*-1.
///
-const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
- const SCEV *RHS) {
+/// FIXME: prohibit FlagNUW here, as soon as getMinusSCEVForExitTest goes.
+const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS, const SCEV *RHS,
+ SCEV::NoWrapFlags Flags) {
// Fast path: X - X --> 0.
if (LHS == RHS)
return getConstant(LHS->getType(), 0);
// X - Y --> X + -Y
- return getAddExpr(LHS, getNegativeSCEV(RHS));
+ return getAddExpr(LHS, getNegativeSCEV(RHS), Flags);
}
/// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
/// input value to the specified type. If the type must be extended, it is zero
/// extended.
const SCEV *
-ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
- const Type *Ty) {
+ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
(Ty->isIntegerTy() || Ty->isPointerTy()) &&
/// ForgetSymbolicValue - This looks up computed SCEV values for all
/// instructions that depend on the given instruction and removes them from
-/// the Scalars map if they reference SymName. This is used during PHI
+/// the ValueExprMapType map if they reference SymName. This is used during PHI
/// resolution.
void
ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
+ const SCEV *Old = It->second;
+
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
- if (It->second != SymName && !It->second->hasOperand(SymName))
+ if (Old != SymName && !hasOperand(Old, SymName))
continue;
// SCEVUnknown for a PHI either means that it has an unrecognized
// updates on its own when it gets to that point. In the third, we do
// want to forget the SCEVUnknown.
if (!isa<PHINode>(I) ||
- !isa<SCEVUnknown>(It->second) ||
- (I != PN && It->second == SymName)) {
- ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ !isa<SCEVUnknown>(Old) ||
+ (I != PN && Old == SymName)) {
+ forgetMemoizedResults(Old);
+ ValueExprMap.erase(It);
}
}
if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
- assert(Scalars.find(PN) == Scalars.end() &&
+ assert(ValueExprMap.find(PN) == ValueExprMap.end() &&
"PHI node already processed?");
- Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
+ ValueExprMap.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
// This is not a valid addrec if the step amount is varying each
// loop iteration, but is not itself an addrec in this loop.
- if (Accum->isLoopInvariant(L) ||
+ if (isLoopInvariant(Accum, L) ||
(isa<SCEVAddRecExpr>(Accum) &&
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
- bool HasNUW = false;
- bool HasNSW = false;
+ SCEV::NoWrapFlags Flags = SCEV::FlagAnyWrap;
// If the increment doesn't overflow, then neither the addrec nor
// the post-increment will overflow.
if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
if (OBO->hasNoUnsignedWrap())
- HasNUW = true;
+ Flags = setFlags(Flags, SCEV::FlagNUW);
if (OBO->hasNoSignedWrap())
- HasNSW = true;
+ Flags = setFlags(Flags, SCEV::FlagNSW);
+ } else if (const GEPOperator *GEP =
+ dyn_cast<GEPOperator>(BEValueV)) {
+ // If the increment is an inbounds GEP, then we know the address
+ // space cannot be wrapped around. We cannot make any guarantee
+ // about signed or unsigned overflow because pointers are
+ // unsigned but we may have a negative index from the base
+ // pointer.
+ if (GEP->isInBounds())
+ // FIXME: should be SCEV::FlagNW
+ Flags = setFlags(Flags, SCEV::FlagNSW);
}
const SCEV *StartVal = getSCEV(StartValueV);
- const SCEV *PHISCEV =
- getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
+ const SCEV *PHISCEV = getAddRecExpr(StartVal, Accum, L, Flags);
// Since the no-wrap flags are on the increment, they apply to the
// post-incremented value as well.
- if (Accum->isLoopInvariant(L))
+ if (isLoopInvariant(Accum, L))
(void)getAddRecExpr(getAddExpr(StartVal, Accum),
- Accum, L, HasNUW, HasNSW);
+ Accum, L, Flags);
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
// initial step of the addrec evolution.
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
AddRec->getOperand(1))) {
+ // FIXME: For constant StartVal, we should be able to infer
+ // no-wrap flags.
const SCEV *PHISCEV =
- getAddRecExpr(StartVal, AddRec->getOperand(1), L);
+ getAddRecExpr(StartVal, AddRec->getOperand(1), L,
+ SCEV::FlagAnyWrap);
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// entries for the scalars that use the symbolic expression.
ForgetSymbolicName(PN, SymbolicName);
- Scalars[SCEVCallbackVH(PN, this)] = PHISCEV;
+ ValueExprMap[SCEVCallbackVH(PN, this)] = PHISCEV;
return PHISCEV;
}
}
// PHI's incoming blocks are in a different loop, in which case doing so
// risks breaking LCSSA form. Instcombine would normally zap these, but
// it doesn't have DominatorTree information, so it may miss cases.
- if (Value *V = PN->hasConstantValue(DT)) {
- bool AllSameLoop = true;
- Loop *PNLoop = LI->getLoopFor(PN->getParent());
- for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
- if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
- AllSameLoop = false;
- break;
- }
- if (AllSameLoop)
+ if (Value *V = SimplifyInstruction(PN, TD, DT))
+ if (LI->replacementPreservesLCSSAForm(PN, V))
return getSCEV(V);
- }
// If it's not a loop phi, we can't handle it yet.
return getUnknown(PN);
// Add expression, because the Instruction may be guarded by control flow
// and the no-overflow bits may not be valid for the expression in any
// context.
+ bool isInBounds = GEP->isInBounds();
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
IndexS = getTruncateOrSignExtend(IndexS, IntPtrTy);
// Multiply the index by the element size to compute the element offset.
- const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize);
+ const SCEV *LocalOffset = getMulExpr(IndexS, ElementSize,
+ isInBounds ? SCEV::FlagNSW :
+ SCEV::FlagAnyWrap);
// Add the element offset to the running total offset.
TotalOffset = getAddExpr(TotalOffset, LocalOffset);
const SCEV *BaseS = getSCEV(Base);
// Add the total offset from all the GEP indices to the base.
- return getAddExpr(BaseS, TotalOffset);
+ return getAddExpr(BaseS, TotalOffset,
+ isInBounds ? SCEV::FlagNSW : SCEV::FlagAnyWrap);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
///
ConstantRange
ScalarEvolution::getUnsignedRange(const SCEV *S) {
+ // See if we've computed this range already.
+ DenseMap<const SCEV *, ConstantRange>::iterator I = UnsignedRanges.find(S);
+ if (I != UnsignedRanges.end())
+ return I->second;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
- return ConstantRange(C->getValue()->getValue());
+ return setUnsignedRange(C, ConstantRange(C->getValue()->getValue()));
unsigned BitWidth = getTypeSizeInBits(S->getType());
ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
ConstantRange X = getUnsignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getUnsignedRange(Add->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(Add, ConservativeResult.intersectWith(X));
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getUnsignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(Mul, ConservativeResult.intersectWith(X));
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getUnsignedRange(SMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(SMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getUnsignedRange(UMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setUnsignedRange(UMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getUnsignedRange(UDiv->getLHS());
ConstantRange Y = getUnsignedRange(UDiv->getRHS());
- return ConservativeResult.intersectWith(X.udiv(Y));
+ return setUnsignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(ZExt->getOperand());
- return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
+ return setUnsignedRange(ZExt,
+ ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(SExt->getOperand());
- return ConservativeResult.intersectWith(X.signExtend(BitWidth));
+ return setUnsignedRange(SExt,
+ ConservativeResult.intersectWith(X.signExtend(BitWidth)));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getUnsignedRange(Trunc->getOperand());
- return ConservativeResult.intersectWith(X.truncate(BitWidth));
+ return setUnsignedRange(Trunc,
+ ConservativeResult.intersectWith(X.truncate(BitWidth)));
}
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
// If there's no unsigned wrap, the value will never be less than its
// initial value.
- if (AddRec->hasNoUnsignedWrap())
+ // FIXME: can broaden to FlagNW?
+ if (AddRec->getNoWrapFlags(SCEV::FlagNUW))
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
if (!C->getValue()->isZero())
ConservativeResult =
ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
ExtEndRange)
- return ConservativeResult;
+ return setUnsignedRange(AddRec, ConservativeResult);
APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
EndRange.getUnsignedMin());
APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
EndRange.getUnsignedMax());
if (Min.isMinValue() && Max.isMaxValue())
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
+ return setUnsignedRange(AddRec, ConservativeResult);
+ return setUnsignedRange(AddRec,
+ ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
}
}
- return ConservativeResult;
+ return setUnsignedRange(AddRec, ConservativeResult);
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
if (Ones == ~Zeros + 1)
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
+ return setUnsignedRange(U, ConservativeResult);
+ return setUnsignedRange(U,
+ ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1)));
}
- return ConservativeResult;
+ return setUnsignedRange(S, ConservativeResult);
}
/// getSignedRange - Determine the signed range for a particular SCEV.
///
ConstantRange
ScalarEvolution::getSignedRange(const SCEV *S) {
+ // See if we've computed this range already.
+ DenseMap<const SCEV *, ConstantRange>::iterator I = SignedRanges.find(S);
+ if (I != SignedRanges.end())
+ return I->second;
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
- return ConstantRange(C->getValue()->getValue());
+ return setSignedRange(C, ConstantRange(C->getValue()->getValue()));
unsigned BitWidth = getTypeSizeInBits(S->getType());
ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
ConstantRange X = getSignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getSignedRange(Add->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(Add, ConservativeResult.intersectWith(X));
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getSignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getSignedRange(Mul->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(Mul, ConservativeResult.intersectWith(X));
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getSignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getSignedRange(SMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(SMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getSignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getSignedRange(UMax->getOperand(i)));
- return ConservativeResult.intersectWith(X);
+ return setSignedRange(UMax, ConservativeResult.intersectWith(X));
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getSignedRange(UDiv->getLHS());
ConstantRange Y = getSignedRange(UDiv->getRHS());
- return ConservativeResult.intersectWith(X.udiv(Y));
+ return setSignedRange(UDiv, ConservativeResult.intersectWith(X.udiv(Y)));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getSignedRange(ZExt->getOperand());
- return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
+ return setSignedRange(ZExt,
+ ConservativeResult.intersectWith(X.zeroExtend(BitWidth)));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getSignedRange(SExt->getOperand());
- return ConservativeResult.intersectWith(X.signExtend(BitWidth));
+ return setSignedRange(SExt,
+ ConservativeResult.intersectWith(X.signExtend(BitWidth)));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getSignedRange(Trunc->getOperand());
- return ConservativeResult.intersectWith(X.truncate(BitWidth));
+ return setSignedRange(Trunc,
+ ConservativeResult.intersectWith(X.truncate(BitWidth)));
}
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
// If there's no signed wrap, and all the operands have the same sign or
// zero, the value won't ever change sign.
- if (AddRec->hasNoSignedWrap()) {
+ if (AddRec->getNoWrapFlags(SCEV::FlagNSW)) {
bool AllNonNeg = true;
bool AllNonPos = true;
for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
ExtEndRange)
- return ConservativeResult;
+ return setSignedRange(AddRec, ConservativeResult);
APInt Min = APIntOps::smin(StartRange.getSignedMin(),
EndRange.getSignedMin());
APInt Max = APIntOps::smax(StartRange.getSignedMax(),
EndRange.getSignedMax());
if (Min.isMinSignedValue() && Max.isMaxSignedValue())
- return ConservativeResult;
- return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
+ return setSignedRange(AddRec, ConservativeResult);
+ return setSignedRange(AddRec,
+ ConservativeResult.intersectWith(ConstantRange(Min, Max+1)));
}
}
- return ConservativeResult;
+ return setSignedRange(AddRec, ConservativeResult);
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
if (!U->getValue()->getType()->isIntegerTy() && !TD)
- return ConservativeResult;
+ return setSignedRange(U, ConservativeResult);
unsigned NS = ComputeNumSignBits(U->getValue(), TD);
if (NS == 1)
- return ConservativeResult;
- return ConservativeResult.intersectWith(
+ return setSignedRange(U, ConservativeResult);
+ return setSignedRange(U, ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
- APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
+ APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1)));
}
- return ConservativeResult;
+ return setSignedRange(S, ConservativeResult);
}
/// createSCEV - We know that there is no SCEV for the specified value.
// LLVM IR canonical form means we need only traverse the left operands.
SmallVector<const SCEV *, 4> AddOps;
AddOps.push_back(getSCEV(U->getOperand(1)));
- for (Value *Op = U->getOperand(0);
- Op->getValueID() == Instruction::Add + Value::InstructionVal;
- Op = U->getOperand(0)) {
+ for (Value *Op = U->getOperand(0); ; Op = U->getOperand(0)) {
+ unsigned Opcode = Op->getValueID() - Value::InstructionVal;
+ if (Opcode != Instruction::Add && Opcode != Instruction::Sub)
+ break;
U = cast<Operator>(Op);
- AddOps.push_back(getSCEV(U->getOperand(1)));
+ const SCEV *Op1 = getSCEV(U->getOperand(1));
+ if (Opcode == Instruction::Sub)
+ AddOps.push_back(getNegativeSCEV(Op1));
+ else
+ AddOps.push_back(Op1);
}
AddOps.push_back(getSCEV(U->getOperand(0)));
return getAddExpr(AddOps);
SmallVector<const SCEV *, 4> MulOps;
MulOps.push_back(getSCEV(U->getOperand(1)));
for (Value *Op = U->getOperand(0);
- Op->getValueID() == Instruction::Mul + Value::InstructionVal;
+ Op->getValueID() == Instruction::Mul + Value::InstructionVal;
Op = U->getOperand(0)) {
U = cast<Operator>(Op);
MulOps.push_back(getSCEV(U->getOperand(1)));
// transfer the no-wrap flags, since an or won't introduce a wrap.
if (const SCEVAddRecExpr *NewAR = dyn_cast<SCEVAddRecExpr>(S)) {
const SCEVAddRecExpr *OldAR = cast<SCEVAddRecExpr>(LHS);
- if (OldAR->hasNoUnsignedWrap())
- const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoUnsignedWrap(true);
- if (OldAR->hasNoSignedWrap())
- const_cast<SCEVAddRecExpr *>(NewAR)->setHasNoSignedWrap(true);
+ const_cast<SCEVAddRecExpr *>(NewAR)->setNoWrapFlags(
+ OldAR->getNoWrapFlags());
}
return S;
}
// If C is a single bit, it may be in the sign-bit position
// before the zero-extend. In this case, represent the xor
// using an add, which is equivalent, and re-apply the zext.
- APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
- if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
+ APInt Trunc = CI->getValue().trunc(Z0TySize);
+ if (Trunc.zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
Trunc.isSignBit())
return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
UTy);
// backedge-taken count, which could result in infinite recursion.
std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
- if (Pair.second) {
- BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
- if (BECount.Exact != getCouldNotCompute()) {
- assert(BECount.Exact->isLoopInvariant(L) &&
- BECount.Max->isLoopInvariant(L) &&
- "Computed backedge-taken count isn't loop invariant for loop!");
- ++NumTripCountsComputed;
+ if (!Pair.second)
+ return Pair.first->second;
+
+ BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
+ if (BECount.Exact != getCouldNotCompute()) {
+ assert(isLoopInvariant(BECount.Exact, L) &&
+ isLoopInvariant(BECount.Max, L) &&
+ "Computed backedge-taken count isn't loop invariant for loop!");
+ ++NumTripCountsComputed;
+ // Update the value in the map.
+ Pair.first->second = BECount;
+ } else {
+ if (BECount.Max != getCouldNotCompute())
// Update the value in the map.
Pair.first->second = BECount;
- } else {
- if (BECount.Max != getCouldNotCompute())
- // Update the value in the map.
- Pair.first->second = BECount;
- if (isa<PHINode>(L->getHeader()->begin()))
- // Only count loops that have phi nodes as not being computable.
- ++NumTripCountsNotComputed;
- }
-
- // Now that we know more about the trip count for this loop, forget any
- // existing SCEV values for PHI nodes in this loop since they are only
- // conservative estimates made without the benefit of trip count
- // information. This is similar to the code in forgetLoop, except that
- // it handles SCEVUnknown PHI nodes specially.
- if (BECount.hasAnyInfo()) {
- SmallVector<Instruction *, 16> Worklist;
- PushLoopPHIs(L, Worklist);
-
- SmallPtrSet<Instruction *, 8> Visited;
- while (!Worklist.empty()) {
- Instruction *I = Worklist.pop_back_val();
- if (!Visited.insert(I)) continue;
-
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
- // SCEVUnknown for a PHI either means that it has an unrecognized
- // structure, or it's a PHI that's in the progress of being computed
- // by createNodeForPHI. In the former case, additional loop trip
- // count information isn't going to change anything. In the later
- // case, createNodeForPHI will perform the necessary updates on its
- // own when it gets to that point.
- if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
- ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
- }
- if (PHINode *PN = dyn_cast<PHINode>(I))
- ConstantEvolutionLoopExitValue.erase(PN);
+ if (isa<PHINode>(L->getHeader()->begin()))
+ // Only count loops that have phi nodes as not being computable.
+ ++NumTripCountsNotComputed;
+ }
+
+ // Now that we know more about the trip count for this loop, forget any
+ // existing SCEV values for PHI nodes in this loop since they are only
+ // conservative estimates made without the benefit of trip count
+ // information. This is similar to the code in forgetLoop, except that
+ // it handles SCEVUnknown PHI nodes specially.
+ if (BECount.hasAnyInfo()) {
+ SmallVector<Instruction *, 16> Worklist;
+ PushLoopPHIs(L, Worklist);
+
+ SmallPtrSet<Instruction *, 8> Visited;
+ while (!Worklist.empty()) {
+ Instruction *I = Worklist.pop_back_val();
+ if (!Visited.insert(I)) continue;
+
+ ValueExprMapType::iterator It =
+ ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
+ const SCEV *Old = It->second;
+
+ // SCEVUnknown for a PHI either means that it has an unrecognized
+ // structure, or it's a PHI that's in the progress of being computed
+ // by createNodeForPHI. In the former case, additional loop trip
+ // count information isn't going to change anything. In the later
+ // case, createNodeForPHI will perform the necessary updates on its
+ // own when it gets to that point.
+ if (!isa<PHINode>(I) || !isa<SCEVUnknown>(Old)) {
+ forgetMemoizedResults(Old);
+ ValueExprMap.erase(It);
}
-
- PushDefUseChildren(I, Worklist);
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ ConstantEvolutionLoopExitValue.erase(PN);
}
+
+ PushDefUseChildren(I, Worklist);
}
}
return Pair.first->second;
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
- ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
+ forgetMemoizedResults(It->second);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
PushDefUseChildren(I, Worklist);
}
+
+ // Forget all contained loops too, to avoid dangling entries in the
+ // ValuesAtScopes map.
+ for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
+ forgetLoop(*I);
}
/// forgetValue - This method should be called by the client when it has
I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV *>::iterator It =
- Scalars.find(static_cast<Value *>(I));
- if (It != Scalars.end()) {
- ValuesAtScopes.erase(It->second);
- Scalars.erase(It);
+ ValueExprMapType::iterator It = ValueExprMap.find(static_cast<Value *>(I));
+ if (It != ValueExprMap.end()) {
+ forgetMemoizedResults(It->second);
+ ValueExprMap.erase(It);
if (PHINode *PN = dyn_cast<PHINode>(I))
ConstantEvolutionLoopExitValue.erase(PN);
}
return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
}
+static const SCEVAddRecExpr *
+isSimpleUnwrappingAddRec(const SCEV *S, const Loop *L) {
+ const SCEVAddRecExpr *SA = dyn_cast<SCEVAddRecExpr>(S);
+
+ // The SCEV must be an addrec of this loop.
+ if (!SA || SA->getLoop() != L || !SA->isAffine())
+ return 0;
+
+ // The SCEV must be known to not wrap in some way to be interesting.
+ if (!SA->getNoWrapFlags(SCEV::FlagNW))
+ return 0;
+
+ // The stride must be a constant so that we know if it is striding up or down.
+ if (!isa<SCEVConstant>(SA->getOperand(1)))
+ return 0;
+ return SA;
+}
+
+/// getMinusSCEVForExitTest - When considering an exit test for a loop with a
+/// "x != y" exit test, we turn this into a computation that evaluates x-y != 0,
+/// and this function returns the expression to use for x-y. We know and take
+/// advantage of the fact that this subtraction is only being used in a
+/// comparison by zero context.
+///
+/// FIXME: this can be completely removed once AddRec FlagNWs are propagated.
+static const SCEV *getMinusSCEVForExitTest(const SCEV *LHS, const SCEV *RHS,
+ const Loop *L, ScalarEvolution &SE) {
+ // If either LHS or RHS is an AddRec SCEV (of this loop) that is known to not
+ // self-wrap, then we know that the value will either become the other one
+ // (and thus the loop terminates), that the loop will terminate through some
+ // other exit condition first, or that the loop has undefined behavior. This
+ // information is useful when the addrec has a stride that is != 1 or -1,
+ // because it means we can't "miss" the exit value.
+ //
+ // In any of these three cases, it is safe to turn the exit condition into a
+ // "counting down" AddRec (to zero) by subtracting the two inputs as normal,
+ // but since we know that the "end cannot be missed" we can force the
+ // resulting AddRec to be a NUW addrec. Since it is counting down, this means
+ // that the AddRec *cannot* pass zero.
+
+ // See if LHS and RHS are addrec's we can handle.
+ const SCEVAddRecExpr *LHSA = isSimpleUnwrappingAddRec(LHS, L);
+ const SCEVAddRecExpr *RHSA = isSimpleUnwrappingAddRec(RHS, L);
+
+ // If neither addrec is interesting, just return a minus.
+ if (RHSA == 0 && LHSA == 0)
+ return SE.getMinusSCEV(LHS, RHS);
+
+ // If only one of LHS and RHS are an AddRec of this loop, make sure it is LHS.
+ if (RHSA && LHSA == 0) {
+ // Safe because a-b === b-a for comparisons against zero.
+ std::swap(LHS, RHS);
+ std::swap(LHSA, RHSA);
+ }
+
+ // Handle the case when only one is advancing in a non-overflowing way.
+ if (RHSA == 0) {
+ // If RHS is loop varying, then we can't predict when LHS will cross it.
+ if (!SE.isLoopInvariant(RHS, L))
+ return SE.getMinusSCEV(LHS, RHS);
+
+ // If LHS has a positive stride, then we compute RHS-LHS, because the loop
+ // is counting up until it crosses RHS (which must be larger than LHS). If
+ // it is negative, we compute LHS-RHS because we're counting down to RHS.
+ const ConstantInt *Stride =
+ cast<SCEVConstant>(LHSA->getOperand(1))->getValue();
+ if (Stride->getValue().isNegative())
+ std::swap(LHS, RHS);
+
+ return SE.getMinusSCEV(RHS, LHS, SCEV::FlagNUW);
+ }
+
+ // If both LHS and RHS are interesting, we have something like:
+ // a+i*4 != b+i*8.
+ const ConstantInt *LHSStride =
+ cast<SCEVConstant>(LHSA->getOperand(1))->getValue();
+ const ConstantInt *RHSStride =
+ cast<SCEVConstant>(RHSA->getOperand(1))->getValue();
+
+ // If the strides are equal, then this is just a (complex) loop invariant
+ // comparison of a and b.
+ if (LHSStride == RHSStride)
+ return SE.getMinusSCEV(LHSA->getStart(), RHSA->getStart());
+
+ // If the signs of the strides differ, then the negative stride is counting
+ // down to the positive stride.
+ if (LHSStride->getValue().isNegative() != RHSStride->getValue().isNegative()){
+ if (RHSStride->getValue().isNegative())
+ std::swap(LHS, RHS);
+ } else {
+ // If LHS's stride is smaller than RHS's stride, then "b" must be less than
+ // "a" and "b" is RHS is counting up (catching up) to LHS. This is true
+ // whether the strides are positive or negative.
+ if (RHSStride->getValue().slt(LHSStride->getValue()))
+ std::swap(LHS, RHS);
+ }
+
+ return SE.getMinusSCEV(LHS, RHS, SCEV::FlagNUW);
+}
+
/// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
/// backedge of the specified loop will execute if its exit condition
/// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
// At this point, we would like to compute how many iterations of the
// loop the predicate will return true for these inputs.
- if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
+ if (isLoopInvariant(LHS, L) && !isLoopInvariant(RHS, L)) {
// If there is a loop-invariant, force it into the RHS.
std::swap(LHS, RHS);
Cond = ICmpInst::getSwappedPredicate(Cond);
switch (Cond) {
case ICmpInst::ICMP_NE: { // while (X != Y)
// Convert to: while (X-Y != 0)
- BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
+ // FIXME: Once AddRec FlagNW are propagated, should be:
+ // BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
+ BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEVForExitTest(LHS, RHS, L,
+ *this), L);
if (BTI.hasAnyInfo()) return BTI;
break;
}
// We can only recognize very limited forms of loop index expressions, in
// particular, only affine AddRec's like {C1,+,C2}.
const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
- if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
+ if (!IdxExpr || !IdxExpr->isAffine() || isLoopInvariant(IdxExpr, L) ||
!isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
!isa<SCEVConstant>(IdxExpr->getOperand(1)))
return getCouldNotCompute();
for (++i; i != e; ++i)
NewOps.push_back(getSCEVAtScope(AddRec->getOperand(i), L));
- AddRec = cast<SCEVAddRecExpr>(getAddRecExpr(NewOps, AddRec->getLoop()));
+ AddRec = cast<SCEVAddRecExpr>(
+ getAddRecExpr(NewOps, AddRec->getLoop(),
+ // FIXME: AddRec->getNoWrapFlags(SCEV::FlagNW)
+ SCEV::FlagAnyWrap));
break;
}
// bit width during computations.
APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
APInt Mod(BW + 1, 0);
- Mod.set(BW - Mult2); // Mod = N / D
+ Mod.setBit(BW - Mult2); // Mod = N / D
APInt I = AD.multiplicativeInverse(Mod);
// 4. Compute the minimum unsigned root of the equation:
/// HowFarToZero - Return the number of times a backedge comparing the specified
/// value to zero will execute. If not computable, return CouldNotCompute.
+///
+/// This is only used for loops with a "x != y" exit test. The exit condition is
+/// now expressed as a single expression, V = x-y. So the exit test is
+/// effectively V != 0. We know and take advantage of the fact that this
+/// expression only being used in a comparison by zero context.
ScalarEvolution::BackedgeTakenInfo
ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
// If the value is a constant
if (!AddRec || AddRec->getLoop() != L)
return getCouldNotCompute();
- if (AddRec->isAffine()) {
- // If this is an affine expression, the execution count of this branch is
- // the minimum unsigned root of the following equation:
- //
- // Start + Step*N = 0 (mod 2^BW)
- //
- // equivalent to:
- //
- // Step*N = -Start (mod 2^BW)
- //
- // where BW is the common bit width of Start and Step.
-
- // Get the initial value for the loop.
- const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
- L->getParentLoop());
- const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
- L->getParentLoop());
-
- if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
- // For now we handle only constant steps.
-
- // First, handle unitary steps.
- if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
- return getNegativeSCEV(Start); // N = -Start (as unsigned)
- if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
- return Start; // N = Start (as unsigned)
-
- // Then, try to solve the above equation provided that Start is constant.
- if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
- return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
- -StartC->getValue()->getValue(),
- *this);
- }
- } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
- // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
- // the quadratic equation to solve it.
- std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
- *this);
+ // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
+ // the quadratic equation to solve it.
+ if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
+ std::pair<const SCEV *,const SCEV *> Roots =
+ SolveQuadraticEquation(AddRec, *this);
const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
- if (R1) {
+ if (R1 && R2) {
#if 0
dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
<< " sol#2: " << *R2 << "\n";
#endif
// Pick the smallest positive root value.
if (ConstantInt *CB =
- dyn_cast<ConstantInt>(ConstantExpr::getICmp(ICmpInst::ICMP_ULT,
- R1->getValue(), R2->getValue()))) {
+ dyn_cast<ConstantInt>(ConstantExpr::getICmp(CmpInst::ICMP_ULT,
+ R1->getValue(),
+ R2->getValue()))) {
if (CB->getZExtValue() == false)
std::swap(R1, R2); // R1 is the minimum root now.
return R1; // We found a quadratic root!
}
}
+ return getCouldNotCompute();
}
+ // Otherwise we can only handle this if it is affine.
+ if (!AddRec->isAffine())
+ return getCouldNotCompute();
+
+ // If this is an affine expression, the execution count of this branch is
+ // the minimum unsigned root of the following equation:
+ //
+ // Start + Step*N = 0 (mod 2^BW)
+ //
+ // equivalent to:
+ //
+ // Step*N = -Start (mod 2^BW)
+ //
+ // where BW is the common bit width of Start and Step.
+
+ // Get the initial value for the loop.
+ const SCEV *Start = getSCEVAtScope(AddRec->getStart(), L->getParentLoop());
+ const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1), L->getParentLoop());
+
+ // For now we handle only constant steps.
+ //
+ // TODO: Handle a nonconstant Step given AddRec<NUW>. If the
+ // AddRec is NUW, then (in an unsigned sense) it cannot be counting up to wrap
+ // to 0, it must be counting down to equal 0. Consequently, N = Start / -Step.
+ // We have not yet seen any such cases.
+ const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step);
+ if (StepC == 0)
+ return getCouldNotCompute();
+
+ // For positive steps (counting up until unsigned overflow):
+ // N = -Start/Step (as unsigned)
+ // For negative steps (counting down to zero):
+ // N = Start/-Step
+ // First compute the unsigned distance from zero in the direction of Step.
+ bool CountDown = StepC->getValue()->getValue().isNegative();
+ const SCEV *Distance = CountDown ? Start : getNegativeSCEV(Start);
+
+ // Handle unitary steps, which cannot wraparound.
+ // 1*N = -Start; -1*N = Start (mod 2^BW), so:
+ // N = Distance (as unsigned)
+ if (StepC->getValue()->equalsInt(1) || StepC->getValue()->isAllOnesValue())
+ return Distance;
+
+ // If the recurrence is known not to wraparound, unsigned divide computes the
+ // back edge count. We know that the value will either become zero (and thus
+ // the loop terminates), that the loop will terminate through some other exit
+ // condition first, or that the loop has undefined behavior. This means
+ // we can't "miss" the exit value, even with nonunit stride.
+ //
+ // FIXME: Prove that loops always exhibits *acceptable* undefined
+ // behavior. Loops must exhibit defined behavior until a wrapped value is
+ // actually used. So the trip count computed by udiv could be smaller than the
+ // number of well-defined iterations.
+ if (AddRec->getNoWrapFlags(SCEV::FlagNW))
+ // FIXME: We really want an "isexact" bit for udiv.
+ return getUDivExpr(Distance, CountDown ? getNegativeSCEV(Step) : Step);
+
+ // Then, try to solve the above equation provided that Start is constant.
+ if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
+ return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
+ -StartC->getValue()->getValue(),
+ *this);
return getCouldNotCompute();
}
// as both operands could be addrecs loop-invariant in each other's loop.
if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS)) {
const Loop *L = AR->getLoop();
- if (LHS->isLoopInvariant(L) && LHS->properlyDominates(L->getHeader(), DT)) {
+ if (isLoopInvariant(LHS, L) && properlyDominates(LHS, L->getHeader())) {
std::swap(LHS, RHS);
Pred = ICmpInst::getSwappedPredicate(Pred);
Changed = true;
case ICmpInst::ICMP_SLE:
if (!getSignedRange(RHS).getSignedMax().isMaxSignedValue()) {
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
- /*HasNUW=*/false, /*HasNSW=*/true);
+ SCEV::FlagNSW);
Pred = ICmpInst::ICMP_SLT;
Changed = true;
} else if (!getSignedRange(LHS).getSignedMin().isMinSignedValue()) {
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
- /*HasNUW=*/false, /*HasNSW=*/true);
+ SCEV::FlagNSW);
Pred = ICmpInst::ICMP_SLT;
Changed = true;
}
case ICmpInst::ICMP_SGE:
if (!getSignedRange(RHS).getSignedMin().isMinSignedValue()) {
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
- /*HasNUW=*/false, /*HasNSW=*/true);
+ SCEV::FlagNSW);
Pred = ICmpInst::ICMP_SGT;
Changed = true;
} else if (!getSignedRange(LHS).getSignedMax().isMaxSignedValue()) {
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
- /*HasNUW=*/false, /*HasNSW=*/true);
+ SCEV::FlagNSW);
Pred = ICmpInst::ICMP_SGT;
Changed = true;
}
case ICmpInst::ICMP_ULE:
if (!getUnsignedRange(RHS).getUnsignedMax().isMaxValue()) {
RHS = getAddExpr(getConstant(RHS->getType(), 1, true), RHS,
- /*HasNUW=*/true, /*HasNSW=*/false);
+ SCEV::FlagNUW);
Pred = ICmpInst::ICMP_ULT;
Changed = true;
} else if (!getUnsignedRange(LHS).getUnsignedMin().isMinValue()) {
LHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), LHS,
- /*HasNUW=*/true, /*HasNSW=*/false);
+ SCEV::FlagNUW);
Pred = ICmpInst::ICMP_ULT;
Changed = true;
}
case ICmpInst::ICMP_UGE:
if (!getUnsignedRange(RHS).getUnsignedMin().isMinValue()) {
RHS = getAddExpr(getConstant(RHS->getType(), (uint64_t)-1, true), RHS,
- /*HasNUW=*/true, /*HasNSW=*/false);
+ SCEV::FlagNUW);
Pred = ICmpInst::ICMP_UGT;
Changed = true;
} else if (!getUnsignedRange(LHS).getUnsignedMax().isMaxValue()) {
LHS = getAddExpr(getConstant(RHS->getType(), 1, true), LHS,
- /*HasNUW=*/true, /*HasNSW=*/false);
+ SCEV::FlagNUW);
Pred = ICmpInst::ICMP_UGT;
Changed = true;
}
trivially_true:
// Return 0 == 0.
- LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Pred = ICmpInst::ICMP_EQ;
return true;
trivially_false:
// Return 0 != 0.
- LHS = RHS = getConstant(Type::getInt1Ty(getContext()), 0);
+ LHS = RHS = getConstant(ConstantInt::getFalse(getContext()));
Pred = ICmpInst::ICMP_NE;
return true;
}
"This code doesn't handle negative strides yet!");
const Type *Ty = Start->getType();
+
+ // When Start == End, we have an exact BECount == 0. Short-circuit this case
+ // here because SCEV may not be able to determine that the unsigned division
+ // after rounding is zero.
+ if (Start == End)
+ return getConstant(Ty, 0);
+
const SCEV *NegOne = getConstant(Ty, (uint64_t)-1);
const SCEV *Diff = getMinusSCEV(End, Start);
const SCEV *RoundUp = getAddExpr(Step, NegOne);
ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
const Loop *L, bool isSigned) {
// Only handle: "ADDREC < LoopInvariant".
- if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
+ if (!isLoopInvariant(RHS, L)) return getCouldNotCompute();
const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
if (!AddRec || AddRec->getLoop() != L)
return getCouldNotCompute();
// Check to see if we have a flag which makes analysis easy.
- bool NoWrap = isSigned ? AddRec->hasNoSignedWrap() :
- AddRec->hasNoUnsignedWrap();
+ bool NoWrap = isSigned ? AddRec->getNoWrapFlags(SCEV::FlagNSW) :
+ AddRec->getNoWrapFlags(SCEV::FlagNUW);
if (AddRec->isAffine()) {
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
// The maximum backedge count is similar, except using the minimum start
// value and the maximum end value.
- const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step, NoWrap);
+ // If we already have an exact constant BECount, use it instead.
+ const SCEV *MaxBECount = isa<SCEVConstant>(BECount) ? BECount
+ : getBECount(MinStart, MaxEnd, Step, NoWrap);
+
+ // If the stride is nonconstant, and NoWrap == true, then
+ // getBECount(MinStart, MaxEnd) may not compute. This would result in an
+ // exact BECount and invalid MaxBECount, which should be avoided to catch
+ // more optimization opportunities.
+ if (isa<SCEVCouldNotCompute>(MaxBECount))
+ MaxBECount = BECount;
return BackedgeTakenInfo(BECount, MaxBECount);
}
if (!SC->getValue()->isZero()) {
SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
Operands[0] = SE.getConstant(SC->getType(), 0);
- const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
+ const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop(),
+ // FIXME: getNoWrapFlags(FlagNW)
+ FlagAnyWrap);
if (const SCEVAddRecExpr *ShiftedAddRec =
dyn_cast<SCEVAddRecExpr>(Shifted))
return ShiftedAddRec->getNumIterationsInRange(
// Range.getUpper() is crossed.
SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
- const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
+ const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop(),
+ // getNoWrapFlags(FlagNW)
+ FlagAnyWrap);
// Next, solve the constructed addrec
std::pair<const SCEV *,const SCEV *> Roots =
assert(SE && "SCEVCallbackVH called with a null ScalarEvolution!");
if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(getValPtr());
+ SE->ValueExprMap.erase(getValPtr());
// this now dangles!
}
continue;
if (PHINode *PN = dyn_cast<PHINode>(U))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(U);
+ SE->ValueExprMap.erase(U);
for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
UI != UE; ++UI)
Worklist.push_back(*UI);
// Delete the Old value.
if (PHINode *PN = dyn_cast<PHINode>(Old))
SE->ConstantEvolutionLoopExitValue.erase(PN);
- SE->Scalars.erase(Old);
+ SE->ValueExprMap.erase(Old);
// this now dangles!
}
ScalarEvolution::ScalarEvolution()
: FunctionPass(ID), FirstUnknown(0) {
+ initializeScalarEvolutionPass(*PassRegistry::getPassRegistry());
}
bool ScalarEvolution::runOnFunction(Function &F) {
U->~SCEVUnknown();
FirstUnknown = 0;
- Scalars.clear();
+ ValueExprMap.clear();
BackedgeTakenCounts.clear();
ConstantEvolutionLoopExitValue.clear();
ValuesAtScopes.clear();
+ LoopDispositions.clear();
+ BlockDispositions.clear();
+ UnsignedRanges.clear();
+ SignedRanges.clear();
UniqueSCEVs.clear();
SCEVAllocator.Reset();
}
if (L) {
OS << "\t\t" "Exits: ";
const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
- if (!ExitValue->isLoopInvariant(L)) {
+ if (!SE.isLoopInvariant(ExitValue, L)) {
OS << "<<Unknown>>";
} else {
OS << *ExitValue;
PrintLoopInfo(OS, &SE, *I);
}
+ScalarEvolution::LoopDisposition
+ScalarEvolution::getLoopDisposition(const SCEV *S, const Loop *L) {
+ std::map<const Loop *, LoopDisposition> &Values = LoopDispositions[S];
+ std::pair<std::map<const Loop *, LoopDisposition>::iterator, bool> Pair =
+ Values.insert(std::make_pair(L, LoopVariant));
+ if (!Pair.second)
+ return Pair.first->second;
+
+ LoopDisposition D = computeLoopDisposition(S, L);
+ return LoopDispositions[S][L] = D;
+}
+
+ScalarEvolution::LoopDisposition
+ScalarEvolution::computeLoopDisposition(const SCEV *S, const Loop *L) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return LoopInvariant;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return getLoopDisposition(cast<SCEVCastExpr>(S)->getOperand(), L);
+ case scAddRecExpr: {
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+
+ // If L is the addrec's loop, it's computable.
+ if (AR->getLoop() == L)
+ return LoopComputable;
+
+ // Add recurrences are never invariant in the function-body (null loop).
+ if (!L)
+ return LoopVariant;
+
+ // This recurrence is variant w.r.t. L if L contains AR's loop.
+ if (L->contains(AR->getLoop()))
+ return LoopVariant;
+
+ // This recurrence is invariant w.r.t. L if AR's loop contains L.
+ if (AR->getLoop()->contains(L))
+ return LoopInvariant;
+
+ // This recurrence is variant w.r.t. L if any of its operands
+ // are variant.
+ for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I)
+ if (!isLoopInvariant(*I, L))
+ return LoopVariant;
+
+ // Otherwise it's loop-invariant.
+ return LoopInvariant;
+ }
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ bool HasVarying = false;
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ LoopDisposition D = getLoopDisposition(*I, L);
+ if (D == LoopVariant)
+ return LoopVariant;
+ if (D == LoopComputable)
+ HasVarying = true;
+ }
+ return HasVarying ? LoopComputable : LoopInvariant;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ LoopDisposition LD = getLoopDisposition(UDiv->getLHS(), L);
+ if (LD == LoopVariant)
+ return LoopVariant;
+ LoopDisposition RD = getLoopDisposition(UDiv->getRHS(), L);
+ if (RD == LoopVariant)
+ return LoopVariant;
+ return (LD == LoopInvariant && RD == LoopInvariant) ?
+ LoopInvariant : LoopComputable;
+ }
+ case scUnknown:
+ // All non-instruction values are loop invariant. All instructions are loop
+ // invariant if they are not contained in the specified loop.
+ // Instructions are never considered invariant in the function body
+ // (null loop) because they are defined within the "loop".
+ if (Instruction *I = dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue()))
+ return (L && !L->contains(I)) ? LoopInvariant : LoopVariant;
+ return LoopInvariant;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return LoopVariant;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return LoopVariant;
+}
+
+bool ScalarEvolution::isLoopInvariant(const SCEV *S, const Loop *L) {
+ return getLoopDisposition(S, L) == LoopInvariant;
+}
+
+bool ScalarEvolution::hasComputableLoopEvolution(const SCEV *S, const Loop *L) {
+ return getLoopDisposition(S, L) == LoopComputable;
+}
+
+ScalarEvolution::BlockDisposition
+ScalarEvolution::getBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ std::map<const BasicBlock *, BlockDisposition> &Values = BlockDispositions[S];
+ std::pair<std::map<const BasicBlock *, BlockDisposition>::iterator, bool>
+ Pair = Values.insert(std::make_pair(BB, DoesNotDominateBlock));
+ if (!Pair.second)
+ return Pair.first->second;
+
+ BlockDisposition D = computeBlockDisposition(S, BB);
+ return BlockDispositions[S][BB] = D;
+}
+
+ScalarEvolution::BlockDisposition
+ScalarEvolution::computeBlockDisposition(const SCEV *S, const BasicBlock *BB) {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return ProperlyDominatesBlock;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend:
+ return getBlockDisposition(cast<SCEVCastExpr>(S)->getOperand(), BB);
+ case scAddRecExpr: {
+ // This uses a "dominates" query instead of "properly dominates" query
+ // to test for proper dominance too, because the instruction which
+ // produces the addrec's value is a PHI, and a PHI effectively properly
+ // dominates its entire containing block.
+ const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
+ if (!DT->dominates(AR->getLoop()->getHeader(), BB))
+ return DoesNotDominateBlock;
+ }
+ // FALL THROUGH into SCEVNAryExpr handling.
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ bool Proper = true;
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ BlockDisposition D = getBlockDisposition(*I, BB);
+ if (D == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ if (D == DominatesBlock)
+ Proper = false;
+ }
+ return Proper ? ProperlyDominatesBlock : DominatesBlock;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
+ BlockDisposition LD = getBlockDisposition(LHS, BB);
+ if (LD == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ BlockDisposition RD = getBlockDisposition(RHS, BB);
+ if (RD == DoesNotDominateBlock)
+ return DoesNotDominateBlock;
+ return (LD == ProperlyDominatesBlock && RD == ProperlyDominatesBlock) ?
+ ProperlyDominatesBlock : DominatesBlock;
+ }
+ case scUnknown:
+ if (Instruction *I =
+ dyn_cast<Instruction>(cast<SCEVUnknown>(S)->getValue())) {
+ if (I->getParent() == BB)
+ return DominatesBlock;
+ if (DT->properlyDominates(I->getParent(), BB))
+ return ProperlyDominatesBlock;
+ return DoesNotDominateBlock;
+ }
+ return ProperlyDominatesBlock;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return DoesNotDominateBlock;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return DoesNotDominateBlock;
+}
+
+bool ScalarEvolution::dominates(const SCEV *S, const BasicBlock *BB) {
+ return getBlockDisposition(S, BB) >= DominatesBlock;
+}
+
+bool ScalarEvolution::properlyDominates(const SCEV *S, const BasicBlock *BB) {
+ return getBlockDisposition(S, BB) == ProperlyDominatesBlock;
+}
+
+bool ScalarEvolution::hasOperand(const SCEV *S, const SCEV *Op) const {
+ switch (S->getSCEVType()) {
+ case scConstant:
+ return false;
+ case scTruncate:
+ case scZeroExtend:
+ case scSignExtend: {
+ const SCEVCastExpr *Cast = cast<SCEVCastExpr>(S);
+ const SCEV *CastOp = Cast->getOperand();
+ return Op == CastOp || hasOperand(CastOp, Op);
+ }
+ case scAddRecExpr:
+ case scAddExpr:
+ case scMulExpr:
+ case scUMaxExpr:
+ case scSMaxExpr: {
+ const SCEVNAryExpr *NAry = cast<SCEVNAryExpr>(S);
+ for (SCEVNAryExpr::op_iterator I = NAry->op_begin(), E = NAry->op_end();
+ I != E; ++I) {
+ const SCEV *NAryOp = *I;
+ if (NAryOp == Op || hasOperand(NAryOp, Op))
+ return true;
+ }
+ return false;
+ }
+ case scUDivExpr: {
+ const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
+ const SCEV *LHS = UDiv->getLHS(), *RHS = UDiv->getRHS();
+ return LHS == Op || hasOperand(LHS, Op) ||
+ RHS == Op || hasOperand(RHS, Op);
+ }
+ case scUnknown:
+ return false;
+ case scCouldNotCompute:
+ llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
+ return false;
+ default: break;
+ }
+ llvm_unreachable("Unknown SCEV kind!");
+ return false;
+}
+
+void ScalarEvolution::forgetMemoizedResults(const SCEV *S) {
+ ValuesAtScopes.erase(S);
+ LoopDispositions.erase(S);
+ BlockDispositions.erase(S);
+ UnsignedRanges.erase(S);
+ SignedRanges.erase(S);
+}