#include "llvm/Target/TargetData.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ConstantRange.h"
+#include "llvm/Support/Debug.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/GetElementPtrTypeIterator.h"
#include "llvm/Support/InstIterator.h"
SCEV::~SCEV() {}
void SCEV::dump() const {
- print(errs());
- errs() << '\n';
+ print(dbgs());
+ dbgs() << '\n';
}
bool SCEV::isZero() const {
}
SCEVCouldNotCompute::SCEVCouldNotCompute() :
- SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
+ SCEV(FoldingSetNodeIDRef(), scCouldNotCompute) {}
bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
llvm_unreachable("Attempt to use a SCEVCouldNotCompute object!");
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
- new (S) SCEVConstant(ID, V);
+ SCEV *S = new (SCEVAllocator) SCEVConstant(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
const SCEV *
ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
- return getConstant(
- ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
+ const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
+ return getConstant(ConstantInt::get(ITy, V, isSigned));
}
const Type *SCEVConstant::getType() const { return V->getType(); }
WriteAsOperand(OS, V, false);
}
-SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
+SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeIDRef ID,
unsigned SCEVTy, const SCEV *op, const Type *ty)
: SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
return Op->properlyDominates(BB, DT);
}
-SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
+SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scTruncate, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate non-integer value!");
}
OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
+SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scZeroExtend, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot zero extend non-integer value!");
}
OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
}
-SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
+SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeIDRef ID,
const SCEV *op, const Type *ty)
: SCEVCastExpr(ID, scSignExtend, op, ty) {
- assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((Op->getType()->isIntegerTy() || Op->getType()->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot sign extend non-integer value!");
}
}
void SCEVCommutativeExpr::print(raw_ostream &OS) const {
- assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
const char *OpStr = getOperationStr();
- OS << "(" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
- OS << OpStr << *Operands[i];
+ OS << "(";
+ for (op_iterator I = op_begin(), E = op_end(); I != E; ++I) {
+ OS << **I;
+ if (next(I) != E)
+ OS << OpStr;
+ }
OS << ")";
}
return false;
// This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
- if (QueryLoop->contains(L->getHeader()))
+ if (QueryLoop->contains(L))
return false;
// This recurrence is variant w.r.t. QueryLoop if any of its operands
return true;
}
-void SCEVAddRecExpr::print(raw_ostream &OS) const {
- OS << "{" << *Operands[0];
- for (unsigned i = 1, e = Operands.size(); i != e; ++i)
- OS << ",+," << *Operands[i];
- OS << "}<" << L->getHeader()->getName() + ">";
+bool
+SCEVAddRecExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
+ return DT->dominates(L->getHeader(), BB) &&
+ SCEVNAryExpr::dominates(BB, DT);
}
-void SCEVFieldOffsetExpr::print(raw_ostream &OS) const {
- // LLVM struct fields don't have names, so just print the field number.
- OS << "offsetof(" << *STy << ", " << FieldNo << ")";
+bool
+SCEVAddRecExpr::properlyDominates(BasicBlock *BB, DominatorTree *DT) const {
+ // This uses a "dominates" query instead of "properly dominates" query because
+ // the instruction which produces the addrec's value is a PHI, and a PHI
+ // effectively properly dominates its entire containing block.
+ return DT->dominates(L->getHeader(), BB) &&
+ SCEVNAryExpr::properlyDominates(BB, DT);
}
-void SCEVAllocSizeExpr::print(raw_ostream &OS) const {
- OS << "sizeof(" << *AllocTy << ")";
+void SCEVAddRecExpr::print(raw_ostream &OS) const {
+ OS << "{" << *Operands[0];
+ for (unsigned i = 1, e = NumOperands; i != e; ++i)
+ OS << ",+," << *Operands[i];
+ OS << "}<";
+ WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
+ OS << ">";
}
bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
// Instructions are never considered invariant in the function body
// (null loop) because they are defined within the "loop".
if (Instruction *I = dyn_cast<Instruction>(V))
- return L && !L->contains(I->getParent());
+ return L && !L->contains(I);
return true;
}
return V->getType();
}
+bool SCEVUnknown::isSizeOf(const Type *&AllocTy) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getOperand(0)->isNullValue() &&
+ CE->getNumOperands() == 2)
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(1)))
+ if (CI->isOne()) {
+ AllocTy = cast<PointerType>(CE->getOperand(0)->getType())
+ ->getElementType();
+ return true;
+ }
+
+ return false;
+}
+
+bool SCEVUnknown::isAlignOf(const Type *&AllocTy) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getOperand(0)->isNullValue()) {
+ const Type *Ty =
+ cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+ if (const StructType *STy = dyn_cast<StructType>(Ty))
+ if (!STy->isPacked() &&
+ CE->getNumOperands() == 3 &&
+ CE->getOperand(1)->isNullValue()) {
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(CE->getOperand(2)))
+ if (CI->isOne() &&
+ STy->getNumElements() == 2 &&
+ STy->getElementType(0)->isIntegerTy(1)) {
+ AllocTy = STy->getElementType(1);
+ return true;
+ }
+ }
+ }
+
+ return false;
+}
+
+bool SCEVUnknown::isOffsetOf(const Type *&CTy, Constant *&FieldNo) const {
+ if (ConstantExpr *VCE = dyn_cast<ConstantExpr>(V))
+ if (VCE->getOpcode() == Instruction::PtrToInt)
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(VCE->getOperand(0)))
+ if (CE->getOpcode() == Instruction::GetElementPtr &&
+ CE->getNumOperands() == 3 &&
+ CE->getOperand(0)->isNullValue() &&
+ CE->getOperand(1)->isNullValue()) {
+ const Type *Ty =
+ cast<PointerType>(CE->getOperand(0)->getType())->getElementType();
+ // Ignore vector types here so that ScalarEvolutionExpander doesn't
+ // emit getelementptrs that index into vectors.
+ if (Ty->isStructTy() || Ty->isArrayTy()) {
+ CTy = Ty;
+ FieldNo = CE->getOperand(2);
+ return true;
+ }
+ }
+
+ return false;
+}
+
void SCEVUnknown::print(raw_ostream &OS) const {
+ const Type *AllocTy;
+ if (isSizeOf(AllocTy)) {
+ OS << "sizeof(" << *AllocTy << ")";
+ return;
+ }
+ if (isAlignOf(AllocTy)) {
+ OS << "alignof(" << *AllocTy << ")";
+ return;
+ }
+
+ const Type *CTy;
+ Constant *FieldNo;
+ if (isOffsetOf(CTy, FieldNo)) {
+ OS << "offsetof(" << *CTy << ", ";
+ WriteAsOperand(OS, FieldNo, false);
+ OS << ")";
+ return;
+ }
+
+ // Otherwise just print it normally.
WriteAsOperand(OS, V, false);
}
// Order pointer values after integer values. This helps SCEVExpander
// form GEPs.
- if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
+ if (LU->getType()->isPointerTy() && !RU->getType()->isPointerTy())
return false;
- if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
+ if (RU->getType()->isPointerTy() && !LU->getType()->isPointerTy())
return true;
// Compare getValueID values.
return operator()(LC->getOperand(), RC->getOperand());
}
- // Compare offsetof expressions.
- if (const SCEVFieldOffsetExpr *LA = dyn_cast<SCEVFieldOffsetExpr>(LHS)) {
- const SCEVFieldOffsetExpr *RA = cast<SCEVFieldOffsetExpr>(RHS);
- if (CompareTypes(LA->getStructType(), RA->getStructType()) ||
- CompareTypes(RA->getStructType(), LA->getStructType()))
- return CompareTypes(LA->getStructType(), RA->getStructType());
- return LA->getFieldNo() < RA->getFieldNo();
- }
-
- // Compare sizeof expressions by the allocation type.
- if (const SCEVAllocSizeExpr *LA = dyn_cast<SCEVAllocSizeExpr>(LHS)) {
- const SCEVAllocSizeExpr *RA = cast<SCEVAllocSizeExpr>(RHS);
- return CompareTypes(LA->getAllocType(), RA->getAllocType());
- }
-
llvm_unreachable("Unknown SCEV kind!");
return false;
}
/// When this routine is finished, we know that any duplicates in the vector are
/// consecutive and that complexity is monotonically increasing.
///
-/// Note that we go take special precautions to ensure that we get determinstic
+/// Note that we go take special precautions to ensure that we get deterministic
/// results from this routine. In other words, we don't want the results of
/// this to depend on where the addresses of various SCEV objects happened to
/// land in memory.
// We need at least W + T bits for the multiplication step
unsigned CalculationBits = W + T;
- // Calcuate 2^T, at width T+W.
+ // Calculate 2^T, at width T+W.
APInt DivFactor = APInt(CalculationBits, 1).shl(T);
// Calculate the multiplicative inverse of K! / 2^T;
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
- new (S) SCEVTruncateExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVTruncateExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
if (MaxBECount == RecastedMaxBECount) {
const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no unsigned overflow.
- const SCEV *ZMul =
- getMulExpr(CastedMaxBECount,
- getTruncateOrZeroExtend(Step, Start->getType()));
+ const SCEV *ZMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, ZMul);
const SCEV *OperandExtendedAdd =
getAddExpr(getZeroExtendExpr(Start, WideTy),
// Similar to above, only this time treat the step value as signed.
// This covers loops that count down.
- const SCEV *SMul =
- getMulExpr(CastedMaxBECount,
- getTruncateOrSignExtend(Step, Start->getType()));
+ const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
Add = getAddExpr(Start, SMul);
OperandExtendedAdd =
getAddExpr(getZeroExtendExpr(Start, WideTy),
const SCEV *N = getConstant(APInt::getMinValue(BitWidth) -
getUnsignedRange(Step).getUnsignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_ULT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_ULT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
const SCEV *N = getConstant(APInt::getMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT, AR, N) &&
- (isLoopGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_UGT, Start, N) ||
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_UGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
- new (S) SCEVZeroExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVZeroExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
if (MaxBECount == RecastedMaxBECount) {
const Type *WideTy = IntegerType::get(getContext(), BitWidth * 2);
// Check whether Start+Step*MaxBECount has no signed overflow.
- const SCEV *SMul =
- getMulExpr(CastedMaxBECount,
- getTruncateOrSignExtend(Step, Start->getType()));
+ const SCEV *SMul = getMulExpr(CastedMaxBECount, Step);
const SCEV *Add = getAddExpr(Start, SMul);
const SCEV *OperandExtendedAdd =
getAddExpr(getSignExtendExpr(Start, WideTy),
// Similar to above, only this time treat the step value as unsigned.
// This covers loops that count up with an unsigned step.
- const SCEV *UMul =
- getMulExpr(CastedMaxBECount,
- getTruncateOrZeroExtend(Step, Start->getType()));
+ const SCEV *UMul = getMulExpr(CastedMaxBECount, Step);
Add = getAddExpr(Start, UMul);
OperandExtendedAdd =
getAddExpr(getSignExtendExpr(Start, WideTy),
const SCEV *N = getConstant(APInt::getSignedMinValue(BitWidth) -
getSignedRange(Step).getSignedMax());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SLT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SLT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
const SCEV *N = getConstant(APInt::getSignedMaxValue(BitWidth) -
getSignedRange(Step).getSignedMin());
if (isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT, AR, N) ||
- (isLoopGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
+ (isLoopEntryGuardedByCond(L, ICmpInst::ICMP_SGT, Start, N) &&
isLoopBackedgeGuardedByCond(L, ICmpInst::ICMP_SGT,
AR->getPostIncExpr(*this), N)))
// Return the expression with the addrec on the outside.
// The cast wasn't folded; create an explicit cast node.
// Recompute the insert position, as it may have been invalidated.
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
- new (S) SCEVSignExtendExpr(ID, Op, Ty);
+ SCEV *S = new (SCEVAllocator) SCEVSignExtendExpr(ID.Intern(SCEVAllocator),
+ Op, Ty);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
if (!isa<SCEVSignExtendExpr>(SExt))
return SExt;
+ // Force the cast to be folded into the operands of an addrec.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op)) {
+ SmallVector<const SCEV *, 4> Ops;
+ for (SCEVAddRecExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
+ I != E; ++I)
+ Ops.push_back(getAnyExtendExpr(*I, Ty));
+ return getAddRecExpr(Ops, AR->getLoop());
+ }
+
// If the expression is obviously signed, use the sext cast value.
if (isa<SCEVSMaxExpr>(Op))
return SExt;
CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
SmallVector<const SCEV *, 8> &NewOps,
APInt &AccumulatedConstant,
- const SmallVectorImpl<const SCEV *> &Ops,
+ const SCEV *const *Ops, size_t NumOperands,
const APInt &Scale,
ScalarEvolution &SE) {
bool Interesting = false;
// Iterate over the add operands.
- for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
+ for (unsigned i = 0, e = NumOperands; i != e; ++i) {
const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
APInt NewScale =
Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
// A multiplication of a constant with another add; recurse.
+ const SCEVAddExpr *Add = cast<SCEVAddExpr>(Mul->getOperand(1));
Interesting |=
CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- cast<SCEVAddExpr>(Mul->getOperand(1))
- ->getOperands(),
+ Add->op_begin(), Add->getNumOperands(),
NewScale, SE);
} else {
// A multiplication of a constant with some other value. Update
}
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
// Pull a buried constant out to the outside.
- if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
+ if (Scale != 1 || AccumulatedConstant != 0 || C->getValue()->isZero())
Interesting = true;
AccumulatedConstant += Scale * C->getValue()->getValue();
} else {
"SCEVAddExpr operand types don't match!");
#endif
+ // If HasNSW is true and all the operands are non-negative, infer HasNUW.
+ if (!HasNUW && HasNSW) {
+ bool All = true;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (!isKnownNonNegative(Ops[i])) {
+ All = false;
+ break;
+ }
+ if (All) HasNUW = true;
+ }
+
// Sort by complexity, this groups all similar expression types together.
GroupByComplexity(Ops, LI);
}
// If we are left with a constant zero being added, strip it off.
- if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
+ if (LHSC->getValue()->isZero()) {
Ops.erase(Ops.begin());
--Idx;
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Okay, check to see if the same value occurs in the operand list twice. If
// so, merge them together into an multiply expression. Since we sorted the
}
LargeOps.push_back(T->getOperand());
} else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeOps.push_back(getAnyExtendExpr(C, SrcType));
} else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
SmallVector<const SCEV *, 8> LargeMulOps;
for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
LargeMulOps.push_back(T->getOperand());
} else if (const SCEVConstant *C =
dyn_cast<SCEVConstant>(M->getOperand(j))) {
- // This could be either sign or zero extension, but sign extension
- // is much more likely to be foldable here.
- LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
+ LargeMulOps.push_back(getAnyExtendExpr(C, SrcType));
} else {
Ok = false;
break;
// If we deleted at least one add, we added operands to the end of the list,
// and they are not necessarily sorted. Recurse to resort and resimplify
- // any operands we just aquired.
+ // any operands we just acquired.
if (DeletedAdd)
return getAddExpr(Ops);
}
SmallVector<const SCEV *, 8> NewOps;
APInt AccumulatedConstant(BitWidth, 0);
if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
- Ops, APInt(BitWidth, 1), *this)) {
+ Ops.data(), Ops.size(),
+ APInt(BitWidth, 1), *this)) {
// Some interesting folding opportunity is present, so its worthwhile to
// re-generate the operands list. Group the operands by constant scale,
// to avoid multiplying by the same constant scale multiple times.
// they are loop invariant w.r.t. the recurrence.
SmallVector<const SCEV *, 8> LIOps;
const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
+ const Loop *AddRecLoop = AddRec->getLoop();
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
- if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
+ if (Ops[i]->isLoopInvariant(AddRecLoop)) {
LIOps.push_back(Ops[i]);
Ops.erase(Ops.begin()+i);
--i; --e;
LIOps.push_back(AddRec->getStart());
SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
- AddRec->op_end());
+ AddRec->op_end());
AddRecOps[0] = getAddExpr(LIOps);
- const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
+ // It's tempting to propagate NUW/NSW flags here, but nuw/nsw addition
+ // is not associative so this isn't necessarily safe.
+ const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRecLoop);
+
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
if (OtherIdx != Idx) {
const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
- if (AddRec->getLoop() == OtherAddRec->getLoop()) {
+ if (AddRecLoop == OtherAddRec->getLoop()) {
// Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
AddRec->op_end());
}
NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
}
- const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
+ const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRecLoop);
if (Ops.size() == 2) return NewAddRec;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEVAddExpr *S = SCEVAllocator.Allocate<SCEVAddExpr>();
- new (S) SCEVAddExpr(ID, Ops);
- UniqueSCEVs.InsertNode(S, IP);
+ SCEVAddExpr *S =
+ static_cast<SCEVAddExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
+ if (!S) {
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVAddExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
+ UniqueSCEVs.InsertNode(S, IP);
+ }
if (HasNUW) S->setHasNoUnsignedWrap(true);
if (HasNSW) S->setHasNoSignedWrap(true);
return S;
}
-
/// getMulExpr - Get a canonical multiply expression, or something simpler if
/// possible.
const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops,
bool HasNUW, bool HasNSW) {
assert(!Ops.empty() && "Cannot get empty mul!");
+ if (Ops.size() == 1) return Ops[0];
#ifndef NDEBUG
for (unsigned i = 1, e = Ops.size(); i != e; ++i)
assert(getEffectiveSCEVType(Ops[i]->getType()) ==
"SCEVMulExpr operand types don't match!");
#endif
+ // If HasNSW is true and all the operands are non-negative, infer HasNUW.
+ if (!HasNUW && HasNSW) {
+ bool All = true;
+ for (unsigned i = 0, e = Ops.size(); i != e; ++i)
+ if (!isKnownNonNegative(Ops[i])) {
+ All = false;
+ break;
+ }
+ if (All) HasNUW = true;
+ }
+
// Sort by complexity, this groups all similar expression types together.
GroupByComplexity(Ops, LI);
return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
getMulExpr(LHSC, Add->getOperand(1)));
-
++Idx;
while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
// We found two constants, fold them together!
} else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
// If we have a multiply of zero, it will always be zero.
return Ops[0];
+ } else if (Ops[0]->isAllOnesValue()) {
+ // If we have a mul by -1 of an add, try distributing the -1 among the
+ // add operands.
+ if (Ops.size() == 2)
+ if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1])) {
+ SmallVector<const SCEV *, 4> NewOps;
+ bool AnyFolded = false;
+ for (SCEVAddRecExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
+ I != E; ++I) {
+ const SCEV *Mul = getMulExpr(Ops[0], *I);
+ if (!isa<SCEVMulExpr>(Mul)) AnyFolded = true;
+ NewOps.push_back(Mul);
+ }
+ if (AnyFolded)
+ return getAddExpr(NewOps);
+ }
}
+
+ if (Ops.size() == 1)
+ return Ops[0];
}
// Skip over the add expression until we get to a multiply.
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
++Idx;
- if (Ops.size() == 1)
- return Ops[0];
-
// If there are mul operands inline them all into this expression.
if (Idx < Ops.size()) {
bool DeletedMul = false;
// If we deleted at least one mul, we added operands to the end of the list,
// and they are not necessarily sorted. Recurse to resort and resimplify
- // any operands we just aquired.
+ // any operands we just acquired.
if (DeletedMul)
return getMulExpr(Ops);
}
}
}
- const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
+ // It's tempting to propagate the NSW flag here, but nsw multiplication
+ // is not associative so this isn't necessarily safe.
+ const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop(),
+ HasNUW && AddRec->hasNoUnsignedWrap(),
+ /*HasNSW=*/false);
// If all of the other operands were loop invariant, we are done.
if (Ops.size() == 1) return NewRec;
for (unsigned i = 0, e = Ops.size(); i != e; ++i)
ID.AddPointer(Ops[i]);
void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEVMulExpr *S = SCEVAllocator.Allocate<SCEVMulExpr>();
- new (S) SCEVMulExpr(ID, Ops);
- UniqueSCEVs.InsertNode(S, IP);
+ SCEVMulExpr *S =
+ static_cast<SCEVMulExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
+ if (!S) {
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ S = new (SCEVAllocator) SCEVMulExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
+ UniqueSCEVs.InsertNode(S, IP);
+ }
if (HasNUW) S->setHasNoUnsignedWrap(true);
if (HasNSW) S->setHasNoSignedWrap(true);
return S;
if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
if (RHSC->getValue()->equalsInt(1))
return LHS; // X udiv 1 --> x
- if (RHSC->isZero())
- return getIntegerSCEV(0, LHS->getType()); // value is undefined
-
- // Determine if the division can be folded into the operands of
- // its operands.
- // TODO: Generalize this to non-constants by using known-bits information.
- const Type *Ty = LHS->getType();
- unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
- unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
- // For non-power-of-two values, effectively round the value up to the
- // nearest power of two.
- if (!RHSC->getValue()->getValue().isPowerOf2())
- ++MaxShiftAmt;
- const IntegerType *ExtTy =
- IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
- // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
- if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
- if (const SCEVConstant *Step =
- dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
- if (!Step->getValue()->getValue()
- .urem(RHSC->getValue()->getValue()) &&
- getZeroExtendExpr(AR, ExtTy) ==
- getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
- getZeroExtendExpr(Step, ExtTy),
- AR->getLoop())) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
- Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
- return getAddRecExpr(Operands, AR->getLoop());
- }
- // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
- if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
- if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
- // Find an operand that's safely divisible.
- for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
- const SCEV *Op = M->getOperand(i);
- const SCEV *Div = getUDivExpr(Op, RHSC);
- if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
- const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
- Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
- MOperands.end());
- Operands[i] = Div;
- return getMulExpr(Operands);
+ // If the denominator is zero, the result of the udiv is undefined. Don't
+ // try to analyze it, because the resolution chosen here may differ from
+ // the resolution chosen in other parts of the compiler.
+ if (!RHSC->getValue()->isZero()) {
+ // Determine if the division can be folded into the operands of
+ // its operands.
+ // TODO: Generalize this to non-constants by using known-bits information.
+ const Type *Ty = LHS->getType();
+ unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
+ unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
+ // For non-power-of-two values, effectively round the value up to the
+ // nearest power of two.
+ if (!RHSC->getValue()->getValue().isPowerOf2())
+ ++MaxShiftAmt;
+ const IntegerType *ExtTy =
+ IntegerType::get(getContext(), getTypeSizeInBits(Ty) + MaxShiftAmt);
+ // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (const SCEVConstant *Step =
+ dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
+ if (!Step->getValue()->getValue()
+ .urem(RHSC->getValue()->getValue()) &&
+ getZeroExtendExpr(AR, ExtTy) ==
+ getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
+ getZeroExtendExpr(Step, ExtTy),
+ AR->getLoop())) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
+ Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
+ return getAddRecExpr(Operands, AR->getLoop());
}
+ // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
+ if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
+ // Find an operand that's safely divisible.
+ for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = M->getOperand(i);
+ const SCEV *Div = getUDivExpr(Op, RHSC);
+ if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
+ Operands = SmallVector<const SCEV *, 4>(M->op_begin(),
+ M->op_end());
+ Operands[i] = Div;
+ return getMulExpr(Operands);
+ }
+ }
+ }
+ // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
+ if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
+ SmallVector<const SCEV *, 4> Operands;
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
+ Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
+ if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
+ Operands.clear();
+ for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
+ const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
+ if (isa<SCEVUDivExpr>(Op) ||
+ getMulExpr(Op, RHS) != A->getOperand(i))
+ break;
+ Operands.push_back(Op);
+ }
+ if (Operands.size() == A->getNumOperands())
+ return getAddExpr(Operands);
}
- }
- // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
- if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
- SmallVector<const SCEV *, 4> Operands;
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
- Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
- if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
- Operands.clear();
- for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
- const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
- if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
- break;
- Operands.push_back(Op);
- }
- if (Operands.size() == A->getNumOperands())
- return getAddExpr(Operands);
}
- }
- // Fold if both operands are constant.
- if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
- Constant *LHSCV = LHSC->getValue();
- Constant *RHSCV = RHSC->getValue();
- return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
- RHSCV)));
+ // Fold if both operands are constant.
+ if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
+ Constant *LHSCV = LHSC->getValue();
+ Constant *RHSCV = RHSC->getValue();
+ return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
+ RHSCV)));
+ }
}
}
ID.AddPointer(RHS);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
- new (S) SCEVUDivExpr(ID, LHS, RHS);
+ SCEV *S = new (SCEVAllocator) SCEVUDivExpr(ID.Intern(SCEVAllocator),
+ LHS, RHS);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
return getAddRecExpr(Operands, L, HasNUW, HasNSW); // {X,+,0} --> X
}
+ // It's tempting to want to call getMaxBackedgeTakenCount count here and
+ // use that information to infer NUW and NSW flags. However, computing a
+ // BE count requires calling getAddRecExpr, so we may not yet have a
+ // meaningful BE count at this point (and if we don't, we'd be stuck
+ // with a SCEVCouldNotCompute as the cached BE count).
+
+ // If HasNSW is true and all the operands are non-negative, infer HasNUW.
+ if (!HasNUW && HasNSW) {
+ bool All = true;
+ for (unsigned i = 0, e = Operands.size(); i != e; ++i)
+ if (!isKnownNonNegative(Operands[i])) {
+ All = false;
+ break;
+ }
+ if (All) HasNUW = true;
+ }
+
// Canonicalize nested AddRecs in by nesting them in order of loop depth.
if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
- const Loop* NestedLoop = NestedAR->getLoop();
- if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
+ const Loop *NestedLoop = NestedAR->getLoop();
+ if (L->contains(NestedLoop->getHeader()) ?
+ (L->getLoopDepth() < NestedLoop->getLoopDepth()) :
+ (!NestedLoop->contains(L->getHeader()) &&
+ DT->dominates(L->getHeader(), NestedLoop->getHeader()))) {
SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
- NestedAR->op_end());
+ NestedAR->op_end());
Operands[0] = NestedAR->getStart();
// AddRecs require their operands be loop-invariant with respect to their
// loops. Don't perform this transformation if it would break this
}
}
+ // Okay, it looks like we really DO need an addrec expr. Check to see if we
+ // already have one, otherwise create a new one.
FoldingSetNodeID ID;
ID.AddInteger(scAddRecExpr);
ID.AddInteger(Operands.size());
ID.AddPointer(Operands[i]);
ID.AddPointer(L);
void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEVAddRecExpr *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
- new (S) SCEVAddRecExpr(ID, Operands, L);
- UniqueSCEVs.InsertNode(S, IP);
+ SCEVAddRecExpr *S =
+ static_cast<SCEVAddRecExpr *>(UniqueSCEVs.FindNodeOrInsertPos(ID, IP));
+ if (!S) {
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Operands.size());
+ std::uninitialized_copy(Operands.begin(), Operands.end(), O);
+ S = new (SCEVAllocator) SCEVAddRecExpr(ID.Intern(SCEVAllocator),
+ O, Operands.size(), L);
+ UniqueSCEVs.InsertNode(S, IP);
+ }
if (HasNUW) S->setHasNoUnsignedWrap(true);
if (HasNSW) S->setHasNoSignedWrap(true);
return S;
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first SMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
+ // X smax Y smax Y --> X smax Y
+ // X smax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_SGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_SLE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
- new (S) SCEVSMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVSMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
// maximum-int.
return Ops[0];
}
- }
- if (Ops.size() == 1) return Ops[0];
+ if (Ops.size() == 1) return Ops[0];
+ }
// Find the first UMax
while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
// so, delete one. Since we sorted the list, these values are required to
// be adjacent.
for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
- if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
+ // X umax Y umax Y --> X umax Y
+ // X umax Y --> X, if X is always greater than Y
+ if (Ops[i] == Ops[i+1] ||
+ isKnownPredicate(ICmpInst::ICMP_UGE, Ops[i], Ops[i+1])) {
+ Ops.erase(Ops.begin()+i+1, Ops.begin()+i+2);
+ --i; --e;
+ } else if (isKnownPredicate(ICmpInst::ICMP_ULE, Ops[i], Ops[i+1])) {
Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
--i; --e;
}
ID.AddPointer(Ops[i]);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
- new (S) SCEVUMaxExpr(ID, Ops);
+ const SCEV **O = SCEVAllocator.Allocate<const SCEV *>(Ops.size());
+ std::uninitialized_copy(Ops.begin(), Ops.end(), O);
+ SCEV *S = new (SCEVAllocator) SCEVUMaxExpr(ID.Intern(SCEVAllocator),
+ O, Ops.size());
UniqueSCEVs.InsertNode(S, IP);
return S;
}
return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
}
-const SCEV *ScalarEvolution::getFieldOffsetExpr(const StructType *STy,
- unsigned FieldNo) {
- // If we have TargetData we can determine the constant offset.
- if (TD) {
- const Type *IntPtrTy = TD->getIntPtrType(getContext());
- const StructLayout &SL = *TD->getStructLayout(STy);
- uint64_t Offset = SL.getElementOffset(FieldNo);
- return getIntegerSCEV(Offset, IntPtrTy);
- }
-
- // Field 0 is always at offset 0.
- if (FieldNo == 0) {
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
- return getIntegerSCEV(0, Ty);
- }
+const SCEV *ScalarEvolution::getSizeOfExpr(const Type *AllocTy) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getTypeAllocSize(AllocTy));
- // Okay, it looks like we really DO need an offsetof expr. Check to see if we
- // already have one, otherwise create a new one.
- FoldingSetNodeID ID;
- ID.AddInteger(scFieldOffset);
- ID.AddPointer(STy);
- ID.AddInteger(FieldNo);
- void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVFieldOffsetExpr>();
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
- new (S) SCEVFieldOffsetExpr(ID, Ty, STy, FieldNo);
- UniqueSCEVs.InsertNode(S, IP);
- return S;
+ Constant *C = ConstantExpr::getSizeOf(AllocTy);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
-const SCEV *ScalarEvolution::getAllocSizeExpr(const Type *AllocTy) {
- // If we have TargetData we can determine the constant size.
- if (TD && AllocTy->isSized()) {
- const Type *IntPtrTy = TD->getIntPtrType(getContext());
- return getIntegerSCEV(TD->getTypeAllocSize(AllocTy), IntPtrTy);
- }
+const SCEV *ScalarEvolution::getAlignOfExpr(const Type *AllocTy) {
+ Constant *C = ConstantExpr::getAlignOf(AllocTy);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
+}
- // Expand an array size into the element size times the number
- // of elements.
- if (const ArrayType *ATy = dyn_cast<ArrayType>(AllocTy)) {
- const SCEV *E = getAllocSizeExpr(ATy->getElementType());
- return getMulExpr(
- E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
- ATy->getNumElements())));
- }
+const SCEV *ScalarEvolution::getOffsetOfExpr(const StructType *STy,
+ unsigned FieldNo) {
+ // If we have TargetData, we can bypass creating a target-independent
+ // constant expression and then folding it back into a ConstantInt.
+ // This is just a compile-time optimization.
+ if (TD)
+ return getConstant(TD->getIntPtrType(getContext()),
+ TD->getStructLayout(STy)->getElementOffset(FieldNo));
- // Expand a vector size into the element size times the number
- // of elements.
- if (const VectorType *VTy = dyn_cast<VectorType>(AllocTy)) {
- const SCEV *E = getAllocSizeExpr(VTy->getElementType());
- return getMulExpr(
- E, getConstant(ConstantInt::get(cast<IntegerType>(E->getType()),
- VTy->getNumElements())));
- }
+ Constant *C = ConstantExpr::getOffsetOf(STy, FieldNo);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(STy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
+}
- // Okay, it looks like we really DO need a sizeof expr. Check to see if we
- // already have one, otherwise create a new one.
- FoldingSetNodeID ID;
- ID.AddInteger(scAllocSize);
- ID.AddPointer(AllocTy);
- void *IP = 0;
- if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVAllocSizeExpr>();
- const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(AllocTy));
- new (S) SCEVAllocSizeExpr(ID, Ty, AllocTy);
- UniqueSCEVs.InsertNode(S, IP);
- return S;
+const SCEV *ScalarEvolution::getOffsetOfExpr(const Type *CTy,
+ Constant *FieldNo) {
+ Constant *C = ConstantExpr::getOffsetOf(CTy, FieldNo);
+ if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C))
+ C = ConstantFoldConstantExpression(CE, TD);
+ const Type *Ty = getEffectiveSCEVType(PointerType::getUnqual(CTy));
+ return getTruncateOrZeroExtend(getSCEV(C), Ty);
}
const SCEV *ScalarEvolution::getUnknown(Value *V) {
ID.AddPointer(V);
void *IP = 0;
if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
- SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
- new (S) SCEVUnknown(ID, V);
+ SCEV *S = new (SCEVAllocator) SCEVUnknown(ID.Intern(SCEVAllocator), V);
UniqueSCEVs.InsertNode(S, IP);
return S;
}
/// has access to target-specific information.
bool ScalarEvolution::isSCEVable(const Type *Ty) const {
// Integers and pointers are always SCEVable.
- return Ty->isInteger() || isa<PointerType>(Ty);
+ return Ty->isIntegerTy() || Ty->isPointerTy();
}
/// getTypeSizeInBits - Return the size in bits of the specified type,
return TD->getTypeSizeInBits(Ty);
// Integer types have fixed sizes.
- if (Ty->isInteger())
+ if (Ty->isIntegerTy())
return Ty->getPrimitiveSizeInBits();
// The only other support type is pointer. Without TargetData, conservatively
// assume pointers are 64-bit.
- assert(isa<PointerType>(Ty) && "isSCEVable permitted a non-SCEVable type!");
+ assert(Ty->isPointerTy() && "isSCEVable permitted a non-SCEVable type!");
return 64;
}
const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
assert(isSCEVable(Ty) && "Type is not SCEVable!");
- if (Ty->isInteger())
+ if (Ty->isIntegerTy())
return Ty;
// The only other support type is pointer.
- assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
+ assert(Ty->isPointerTy() && "Unexpected non-pointer non-integer type!");
if (TD) return TD->getIntPtrType(getContext());
// Without TargetData, conservatively assume pointers are 64-bit.
/// getIntegerSCEV - Given a SCEVable type, create a constant for the
/// specified signed integer value and return a SCEV for the constant.
-const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
+const SCEV *ScalarEvolution::getIntegerSCEV(int64_t Val, const Type *Ty) {
const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
return getConstant(ConstantInt::get(ITy, Val));
}
ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or zero extend with non-integer arguments!");
if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
return V; // No conversion
const SCEV *
ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or zero extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrZeroExtend cannot truncate!");
const SCEV *
ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or sign extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrSignExtend cannot truncate!");
const SCEV *
ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot noop or any extend with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
"getNoopOrAnyExtend cannot truncate!");
const SCEV *
ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
const Type *SrcTy = V->getType();
- assert((SrcTy->isInteger() || isa<PointerType>(SrcTy)) &&
- (Ty->isInteger() || isa<PointerType>(Ty)) &&
+ assert((SrcTy->isIntegerTy() || SrcTy->isPointerTy()) &&
+ (Ty->isIntegerTy() || Ty->isPointerTy()) &&
"Cannot truncate or noop with non-integer arguments!");
assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
"getTruncateOrNoop cannot extend!");
/// the Scalars map if they reference SymName. This is used during PHI
/// resolution.
void
-ScalarEvolution::ForgetSymbolicName(Instruction *I, const SCEV *SymName) {
+ScalarEvolution::ForgetSymbolicName(Instruction *PN, const SCEV *SymName) {
SmallVector<Instruction *, 16> Worklist;
- PushDefUseChildren(I, Worklist);
+ PushDefUseChildren(PN, Worklist);
SmallPtrSet<Instruction *, 8> Visited;
- Visited.insert(I);
+ Visited.insert(PN);
while (!Worklist.empty()) {
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV*>::iterator It =
+ std::map<SCEVCallbackVH, const SCEV *>::iterator It =
Scalars.find(static_cast<Value *>(I));
if (It != Scalars.end()) {
// Short-circuit the def-use traversal if the symbolic name
// ceases to appear in expressions.
- if (!It->second->hasOperand(SymName))
+ if (It->second != SymName && !It->second->hasOperand(SymName))
continue;
// SCEVUnknown for a PHI either means that it has an unrecognized
- // structure, or it's a PHI that's in the progress of being computed
- // by createNodeForPHI. In the former case, additional loop trip
- // count information isn't going to change anything. In the later
- // case, createNodeForPHI will perform the necessary updates on its
- // own when it gets to that point.
- if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second)) {
+ // structure, it's a PHI that's in the progress of being computed
+ // by createNodeForPHI, or it's a single-value PHI. In the first case,
+ // additional loop trip count information isn't going to change anything.
+ // In the second case, createNodeForPHI will perform the necessary
+ // updates on its own when it gets to that point. In the third, we do
+ // want to forget the SCEVUnknown.
+ if (!isa<PHINode>(I) ||
+ !isa<SCEVUnknown>(It->second) ||
+ (I != PN && It->second == SymName)) {
ValuesAtScopes.erase(It->second);
Scalars.erase(It);
}
/// a loop header, making it a potential recurrence, or it doesn't.
///
const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
- if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
- if (const Loop *L = LI->getLoopFor(PN->getParent()))
- if (L->getHeader() == PN->getParent()) {
- // If it lives in the loop header, it has two incoming values, one
- // from outside the loop, and one from inside.
- unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
- unsigned BackEdge = IncomingEdge^1;
-
+ if (const Loop *L = LI->getLoopFor(PN->getParent()))
+ if (L->getHeader() == PN->getParent()) {
+ // The loop may have multiple entrances or multiple exits; we can analyze
+ // this phi as an addrec if it has a unique entry value and a unique
+ // backedge value.
+ Value *BEValueV = 0, *StartValueV = 0;
+ for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
+ Value *V = PN->getIncomingValue(i);
+ if (L->contains(PN->getIncomingBlock(i))) {
+ if (!BEValueV) {
+ BEValueV = V;
+ } else if (BEValueV != V) {
+ BEValueV = 0;
+ break;
+ }
+ } else if (!StartValueV) {
+ StartValueV = V;
+ } else if (StartValueV != V) {
+ StartValueV = 0;
+ break;
+ }
+ }
+ if (BEValueV && StartValueV) {
// While we are analyzing this PHI node, handle its value symbolically.
const SCEV *SymbolicName = getUnknown(PN);
assert(Scalars.find(PN) == Scalars.end() &&
// Using this symbolic name for the PHI, analyze the value coming around
// the back-edge.
- Value *BEValueV = PN->getIncomingValue(BackEdge);
const SCEV *BEValue = getSCEV(BEValueV);
// NOTE: If BEValue is loop invariant, we know that the PHI node just
if (Accum->isLoopInvariant(L) ||
(isa<SCEVAddRecExpr>(Accum) &&
cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
- const SCEV *StartVal =
- getSCEV(PN->getIncomingValue(IncomingEdge));
- const SCEVAddRecExpr *PHISCEV =
- cast<SCEVAddRecExpr>(getAddRecExpr(StartVal, Accum, L));
-
- // If the increment doesn't overflow, then neither the addrec nor the
- // post-increment will overflow.
- if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV))
- if (OBO->getOperand(0) == PN &&
- getSCEV(OBO->getOperand(1)) ==
- PHISCEV->getStepRecurrence(*this)) {
- const SCEVAddRecExpr *PostInc = PHISCEV->getPostIncExpr(*this);
- if (OBO->hasNoUnsignedWrap()) {
- const_cast<SCEVAddRecExpr *>(PHISCEV)
- ->setHasNoUnsignedWrap(true);
- const_cast<SCEVAddRecExpr *>(PostInc)
- ->setHasNoUnsignedWrap(true);
- }
- if (OBO->hasNoSignedWrap()) {
- const_cast<SCEVAddRecExpr *>(PHISCEV)
- ->setHasNoSignedWrap(true);
- const_cast<SCEVAddRecExpr *>(PostInc)
- ->setHasNoSignedWrap(true);
- }
- }
+ bool HasNUW = false;
+ bool HasNSW = false;
+
+ // If the increment doesn't overflow, then neither the addrec nor
+ // the post-increment will overflow.
+ if (const AddOperator *OBO = dyn_cast<AddOperator>(BEValueV)) {
+ if (OBO->hasNoUnsignedWrap())
+ HasNUW = true;
+ if (OBO->hasNoSignedWrap())
+ HasNSW = true;
+ }
+
+ const SCEV *StartVal = getSCEV(StartValueV);
+ const SCEV *PHISCEV =
+ getAddRecExpr(StartVal, Accum, L, HasNUW, HasNSW);
+
+ // Since the no-wrap flags are on the increment, they apply to the
+ // post-incremented value as well.
+ if (Accum->isLoopInvariant(L))
+ (void)getAddRecExpr(getAddExpr(StartVal, Accum),
+ Accum, L, HasNUW, HasNSW);
// Okay, for the entire analysis of this edge we assumed the PHI
// to be symbolic. We now need to go back and purge all of the
// Because the other in-value of i (0) fits the evolution of BEValue
// i really is an addrec evolution.
if (AddRec->getLoop() == L && AddRec->isAffine()) {
- const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
+ const SCEV *StartVal = getSCEV(StartValueV);
// If StartVal = j.start - j.stride, we can use StartVal as the
// initial step of the addrec evolution.
if (StartVal == getMinusSCEV(AddRec->getOperand(0),
- AddRec->getOperand(1))) {
+ AddRec->getOperand(1))) {
const SCEV *PHISCEV =
getAddRecExpr(StartVal, AddRec->getOperand(1), L);
}
}
}
-
- return SymbolicName;
}
+ }
- // It's tempting to recognize PHIs with a unique incoming value, however
- // this leads passes like indvars to break LCSSA form. Fortunately, such
- // PHIs are rare, as instcombine zaps them.
+ // If the PHI has a single incoming value, follow that value, unless the
+ // PHI's incoming blocks are in a different loop, in which case doing so
+ // risks breaking LCSSA form. Instcombine would normally zap these, but
+ // it doesn't have DominatorTree information, so it may miss cases.
+ if (Value *V = PN->hasConstantValue(DT)) {
+ bool AllSameLoop = true;
+ Loop *PNLoop = LI->getLoopFor(PN->getParent());
+ for (size_t i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
+ if (LI->getLoopFor(PN->getIncomingBlock(i)) != PNLoop) {
+ AllSameLoop = false;
+ break;
+ }
+ if (AllSameLoop)
+ return getSCEV(V);
+ }
// If it's not a loop phi, we can't handle it yet.
return getUnknown(PN);
/// createNodeForGEP - Expand GEP instructions into add and multiply
/// operations. This allows them to be analyzed by regular SCEV code.
///
-const SCEV *ScalarEvolution::createNodeForGEP(Operator *GEP) {
+const SCEV *ScalarEvolution::createNodeForGEP(GEPOperator *GEP) {
+ bool InBounds = GEP->isInBounds();
const Type *IntPtrTy = getEffectiveSCEVType(GEP->getType());
Value *Base = GEP->getOperand(0);
// Don't attempt to analyze GEPs over unsized objects.
// For a struct, add the member offset.
unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
TotalOffset = getAddExpr(TotalOffset,
- getFieldOffsetExpr(STy, FieldNo));
+ getOffsetOfExpr(STy, FieldNo),
+ /*HasNUW=*/false, /*HasNSW=*/InBounds);
} else {
// For an array, add the element offset, explicitly scaled.
const SCEV *LocalOffset = getSCEV(Index);
- if (!isa<PointerType>(LocalOffset->getType()))
- // Getelementptr indicies are signed.
- LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
- LocalOffset = getMulExpr(LocalOffset, getAllocSizeExpr(*GTI));
- TotalOffset = getAddExpr(TotalOffset, LocalOffset);
+ // Getelementptr indices are signed.
+ LocalOffset = getTruncateOrSignExtend(LocalOffset, IntPtrTy);
+ // Lower "inbounds" GEPs to NSW arithmetic.
+ LocalOffset = getMulExpr(LocalOffset, getSizeOfExpr(*GTI),
+ /*HasNUW=*/false, /*HasNSW=*/InBounds);
+ TotalOffset = getAddExpr(TotalOffset, LocalOffset,
+ /*HasNUW=*/false, /*HasNSW=*/InBounds);
}
}
- return getAddExpr(getSCEV(Base), TotalOffset);
+ return getAddExpr(getSCEV(Base), TotalOffset,
+ /*HasNUW=*/false, /*HasNSW=*/InBounds);
}
/// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
return ConstantRange(C->getValue()->getValue());
+ unsigned BitWidth = getTypeSizeInBits(S->getType());
+ ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
+
+ // If the value has known zeros, the maximum unsigned value will have those
+ // known zeros as well.
+ uint32_t TZ = GetMinTrailingZeros(S);
+ if (TZ != 0)
+ ConservativeResult =
+ ConstantRange(APInt::getMinValue(BitWidth),
+ APInt::getMaxValue(BitWidth).lshr(TZ).shl(TZ) + 1);
+
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
ConstantRange X = getUnsignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getUnsignedRange(Add->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getUnsignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getUnsignedRange(Mul->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getUnsignedRange(SMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getUnsignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getUnsignedRange(UMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getUnsignedRange(UDiv->getLHS());
ConstantRange Y = getUnsignedRange(UDiv->getRHS());
- return X.udiv(Y);
+ return ConservativeResult.intersectWith(X.udiv(Y));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(ZExt->getOperand());
- return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getUnsignedRange(SExt->getOperand());
- return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.signExtend(BitWidth));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getUnsignedRange(Trunc->getOperand());
- return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.truncate(BitWidth));
}
- ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
-
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
- const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
- const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
- if (!Trip) return FullSet;
+ // If there's no unsigned wrap, the value will never be less than its
+ // initial value.
+ if (AddRec->hasNoUnsignedWrap())
+ if (const SCEVConstant *C = dyn_cast<SCEVConstant>(AddRec->getStart()))
+ if (!C->getValue()->isZero())
+ ConservativeResult =
+ ConstantRange(C->getValue()->getValue(), APInt(BitWidth, 0));
// TODO: non-affine addrec
if (AddRec->isAffine()) {
const Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
- if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
+ if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
+ getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
const SCEV *Step = AddRec->getStepRecurrence(*this);
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
-
- // Check for overflow.
- // TODO: This is very conservative.
- if (!(Step->isOne() &&
- isKnownPredicate(ICmpInst::ICMP_ULT, Start, End)) &&
- !(Step->isAllOnesValue() &&
- isKnownPredicate(ICmpInst::ICMP_UGT, Start, End)))
- return FullSet;
ConstantRange StartRange = getUnsignedRange(Start);
- ConstantRange EndRange = getUnsignedRange(End);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.zextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
+ return ConservativeResult;
+
APInt Min = APIntOps::umin(StartRange.getUnsignedMin(),
EndRange.getUnsignedMin());
APInt Max = APIntOps::umax(StartRange.getUnsignedMax(),
EndRange.getUnsignedMax());
if (Min.isMinValue() && Max.isMaxValue())
- return FullSet;
- return ConstantRange(Min, Max+1);
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
}
}
+
+ return ConservativeResult;
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- unsigned BitWidth = getTypeSizeInBits(U->getType());
APInt Mask = APInt::getAllOnesValue(BitWidth);
APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
if (Ones == ~Zeros + 1)
- return FullSet;
- return ConstantRange(Ones, ~Zeros + 1);
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(ConstantRange(Ones, ~Zeros + 1));
}
- return FullSet;
+ return ConservativeResult;
}
/// getSignedRange - Determine the signed range for a particular SCEV.
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
return ConstantRange(C->getValue()->getValue());
+ unsigned BitWidth = getTypeSizeInBits(S->getType());
+ ConstantRange ConservativeResult(BitWidth, /*isFullSet=*/true);
+
+ // If the value has known zeros, the maximum signed value will have those
+ // known zeros as well.
+ uint32_t TZ = GetMinTrailingZeros(S);
+ if (TZ != 0)
+ ConservativeResult =
+ ConstantRange(APInt::getSignedMinValue(BitWidth),
+ APInt::getSignedMaxValue(BitWidth).ashr(TZ).shl(TZ) + 1);
+
if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
ConstantRange X = getSignedRange(Add->getOperand(0));
for (unsigned i = 1, e = Add->getNumOperands(); i != e; ++i)
X = X.add(getSignedRange(Add->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
ConstantRange X = getSignedRange(Mul->getOperand(0));
for (unsigned i = 1, e = Mul->getNumOperands(); i != e; ++i)
X = X.multiply(getSignedRange(Mul->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(S)) {
ConstantRange X = getSignedRange(SMax->getOperand(0));
for (unsigned i = 1, e = SMax->getNumOperands(); i != e; ++i)
X = X.smax(getSignedRange(SMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(S)) {
ConstantRange X = getSignedRange(UMax->getOperand(0));
for (unsigned i = 1, e = UMax->getNumOperands(); i != e; ++i)
X = X.umax(getSignedRange(UMax->getOperand(i)));
- return X;
+ return ConservativeResult.intersectWith(X);
}
if (const SCEVUDivExpr *UDiv = dyn_cast<SCEVUDivExpr>(S)) {
ConstantRange X = getSignedRange(UDiv->getLHS());
ConstantRange Y = getSignedRange(UDiv->getRHS());
- return X.udiv(Y);
+ return ConservativeResult.intersectWith(X.udiv(Y));
}
if (const SCEVZeroExtendExpr *ZExt = dyn_cast<SCEVZeroExtendExpr>(S)) {
ConstantRange X = getSignedRange(ZExt->getOperand());
- return X.zeroExtend(cast<IntegerType>(ZExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.zeroExtend(BitWidth));
}
if (const SCEVSignExtendExpr *SExt = dyn_cast<SCEVSignExtendExpr>(S)) {
ConstantRange X = getSignedRange(SExt->getOperand());
- return X.signExtend(cast<IntegerType>(SExt->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.signExtend(BitWidth));
}
if (const SCEVTruncateExpr *Trunc = dyn_cast<SCEVTruncateExpr>(S)) {
ConstantRange X = getSignedRange(Trunc->getOperand());
- return X.truncate(cast<IntegerType>(Trunc->getType())->getBitWidth());
+ return ConservativeResult.intersectWith(X.truncate(BitWidth));
}
- ConstantRange FullSet(getTypeSizeInBits(S->getType()), true);
-
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(S)) {
- const SCEV *T = getBackedgeTakenCount(AddRec->getLoop());
- const SCEVConstant *Trip = dyn_cast<SCEVConstant>(T);
- if (!Trip) return FullSet;
+ // If there's no signed wrap, and all the operands have the same sign or
+ // zero, the value won't ever change sign.
+ if (AddRec->hasNoSignedWrap()) {
+ bool AllNonNeg = true;
+ bool AllNonPos = true;
+ for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
+ if (!isKnownNonNegative(AddRec->getOperand(i))) AllNonNeg = false;
+ if (!isKnownNonPositive(AddRec->getOperand(i))) AllNonPos = false;
+ }
+ if (AllNonNeg)
+ ConservativeResult = ConservativeResult.intersectWith(
+ ConstantRange(APInt(BitWidth, 0),
+ APInt::getSignedMinValue(BitWidth)));
+ else if (AllNonPos)
+ ConservativeResult = ConservativeResult.intersectWith(
+ ConstantRange(APInt::getSignedMinValue(BitWidth),
+ APInt(BitWidth, 1)));
+ }
// TODO: non-affine addrec
if (AddRec->isAffine()) {
const Type *Ty = AddRec->getType();
const SCEV *MaxBECount = getMaxBackedgeTakenCount(AddRec->getLoop());
- if (getTypeSizeInBits(MaxBECount->getType()) <= getTypeSizeInBits(Ty)) {
+ if (!isa<SCEVCouldNotCompute>(MaxBECount) &&
+ getTypeSizeInBits(MaxBECount->getType()) <= BitWidth) {
MaxBECount = getNoopOrZeroExtend(MaxBECount, Ty);
const SCEV *Start = AddRec->getStart();
const SCEV *Step = AddRec->getStepRecurrence(*this);
- const SCEV *End = AddRec->evaluateAtIteration(MaxBECount, *this);
-
- // Check for overflow.
- // TODO: This is very conservative.
- if (!(Step->isOne() &&
- isKnownPredicate(ICmpInst::ICMP_SLT, Start, End)) &&
- !(Step->isAllOnesValue() &&
- isKnownPredicate(ICmpInst::ICMP_SGT, Start, End)))
- return FullSet;
ConstantRange StartRange = getSignedRange(Start);
- ConstantRange EndRange = getSignedRange(End);
+ ConstantRange StepRange = getSignedRange(Step);
+ ConstantRange MaxBECountRange = getUnsignedRange(MaxBECount);
+ ConstantRange EndRange =
+ StartRange.add(MaxBECountRange.multiply(StepRange));
+
+ // Check for overflow. This must be done with ConstantRange arithmetic
+ // because we could be called from within the ScalarEvolution overflow
+ // checking code.
+ ConstantRange ExtStartRange = StartRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtStepRange = StepRange.sextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtMaxBECountRange =
+ MaxBECountRange.zextOrTrunc(BitWidth*2+1);
+ ConstantRange ExtEndRange = EndRange.sextOrTrunc(BitWidth*2+1);
+ if (ExtStartRange.add(ExtMaxBECountRange.multiply(ExtStepRange)) !=
+ ExtEndRange)
+ return ConservativeResult;
+
APInt Min = APIntOps::smin(StartRange.getSignedMin(),
EndRange.getSignedMin());
APInt Max = APIntOps::smax(StartRange.getSignedMax(),
EndRange.getSignedMax());
if (Min.isMinSignedValue() && Max.isMaxSignedValue())
- return FullSet;
- return ConstantRange(Min, Max+1);
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(ConstantRange(Min, Max+1));
}
}
+
+ return ConservativeResult;
}
if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
// For a SCEVUnknown, ask ValueTracking.
- unsigned BitWidth = getTypeSizeInBits(U->getType());
+ if (!U->getValue()->getType()->isIntegerTy() && !TD)
+ return ConservativeResult;
unsigned NS = ComputeNumSignBits(U->getValue(), TD);
if (NS == 1)
- return FullSet;
- return
+ return ConservativeResult;
+ return ConservativeResult.intersectWith(
ConstantRange(APInt::getSignedMinValue(BitWidth).ashr(NS - 1),
- APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1);
+ APInt::getSignedMaxValue(BitWidth).ashr(NS - 1)+1));
}
- return FullSet;
+ return ConservativeResult;
}
/// createSCEV - We know that there is no SCEV for the specified value.
return getUnknown(V);
unsigned Opcode = Instruction::UserOp1;
- if (Instruction *I = dyn_cast<Instruction>(V))
+ if (Instruction *I = dyn_cast<Instruction>(V)) {
Opcode = I->getOpcode();
- else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
+
+ // Don't attempt to analyze instructions in blocks that aren't
+ // reachable. Such instructions don't matter, and they aren't required
+ // to obey basic rules for definitions dominating uses which this
+ // analysis depends on.
+ if (!DT->isReachableFromEntry(I->getParent()))
+ return getUnknown(V);
+ } else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
Opcode = CE->getOpcode();
else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
return getConstant(CI);
else if (isa<ConstantPointerNull>(V))
return getIntegerSCEV(0, V->getType());
- else if (isa<UndefValue>(V))
- return getIntegerSCEV(0, V->getType());
else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
return GA->mayBeOverridden() ? getUnknown(V) : getSCEV(GA->getAliasee());
else
const Type *Z0Ty = Z0->getType();
unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
- // If C is a low-bits mask, the zero extend is zerving to
+ // If C is a low-bits mask, the zero extend is serving to
// mask off the high bits. Complement the operand and
// re-apply the zext.
if (APIntOps::isMask(Z0TySize, CI->getValue()))
case Instruction::Shl:
// Turn shift left of a constant amount into a multiply.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
case Instruction::LShr:
// Turn logical shift right of a constant into a unsigned divide.
if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
- uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
+ uint32_t BitWidth = cast<IntegerType>(U->getType())->getBitWidth();
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (SA->getValue().uge(BitWidth))
+ break;
+
Constant *X = ConstantInt::get(getContext(),
- APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
+ APInt(BitWidth, 1).shl(SA->getZExtValue()));
return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
}
break;
case Instruction::AShr:
// For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
- if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
+ if (Operator *L = dyn_cast<Operator>(U->getOperand(0)))
if (L->getOpcode() == Instruction::Shl &&
L->getOperand(1) == U->getOperand(1)) {
- unsigned BitWidth = getTypeSizeInBits(U->getType());
+ uint64_t BitWidth = getTypeSizeInBits(U->getType());
+
+ // If the shift count is not less than the bitwidth, the result of
+ // the shift is undefined. Don't try to analyze it, because the
+ // resolution chosen here may differ from the resolution chosen in
+ // other parts of the compiler.
+ if (CI->getValue().uge(BitWidth))
+ break;
+
uint64_t Amt = BitWidth - CI->getZExtValue();
if (Amt == BitWidth)
return getSCEV(L->getOperand(0)); // shift by zero --> noop
- if (Amt > BitWidth)
- return getIntegerSCEV(0, U->getType()); // value is undefined
return
getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
- IntegerType::get(getContext(), Amt)),
- U->getType());
+ IntegerType::get(getContext(),
+ Amt)),
+ U->getType());
}
break;
return getSCEV(U->getOperand(0));
break;
- // It's tempting to handle inttoptr and ptrtoint, however this can
- // lead to pointer expressions which cannot be expanded to GEPs
- // (because they may overflow). For now, the only pointer-typed
- // expressions we handle are GEPs and address literals.
+ // It's tempting to handle inttoptr and ptrtoint as no-ops, however this can
+ // lead to pointer expressions which cannot safely be expanded to GEPs,
+ // because ScalarEvolution doesn't respect the GEP aliasing rules when
+ // simplifying integer expressions.
case Instruction::GetElementPtr:
- return createNodeForGEP(U);
+ return createNodeForGEP(cast<GEPOperator>(U));
case Instruction::PHI:
return createNodeForPHI(cast<PHINode>(U));
const ScalarEvolution::BackedgeTakenInfo &
ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
// Initially insert a CouldNotCompute for this loop. If the insertion
- // succeeds, procede to actually compute a backedge-taken count and
+ // succeeds, proceed to actually compute a backedge-taken count and
// update the value. The temporary CouldNotCompute value tells SCEV
// code elsewhere that it shouldn't attempt to request a new
// backedge-taken count, which could result in infinite recursion.
- std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
+ std::pair<std::map<const Loop *, BackedgeTakenInfo>::iterator, bool> Pair =
BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
if (Pair.second) {
- BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
- if (ItCount.Exact != getCouldNotCompute()) {
- assert(ItCount.Exact->isLoopInvariant(L) &&
- ItCount.Max->isLoopInvariant(L) &&
- "Computed trip count isn't loop invariant for loop!");
+ BackedgeTakenInfo BECount = ComputeBackedgeTakenCount(L);
+ if (BECount.Exact != getCouldNotCompute()) {
+ assert(BECount.Exact->isLoopInvariant(L) &&
+ BECount.Max->isLoopInvariant(L) &&
+ "Computed backedge-taken count isn't loop invariant for loop!");
++NumTripCountsComputed;
// Update the value in the map.
- Pair.first->second = ItCount;
+ Pair.first->second = BECount;
} else {
- if (ItCount.Max != getCouldNotCompute())
+ if (BECount.Max != getCouldNotCompute())
// Update the value in the map.
- Pair.first->second = ItCount;
+ Pair.first->second = BECount;
if (isa<PHINode>(L->getHeader()->begin()))
// Only count loops that have phi nodes as not being computable.
++NumTripCountsNotComputed;
// conservative estimates made without the benefit of trip count
// information. This is similar to the code in forgetLoop, except that
// it handles SCEVUnknown PHI nodes specially.
- if (ItCount.hasAnyInfo()) {
+ if (BECount.hasAnyInfo()) {
SmallVector<Instruction *, 16> Worklist;
PushLoopPHIs(L, Worklist);
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV*>::iterator It =
+ std::map<SCEVCallbackVH, const SCEV *>::iterator It =
Scalars.find(static_cast<Value *>(I));
if (It != Scalars.end()) {
// SCEVUnknown for a PHI either means that it has an unrecognized
Instruction *I = Worklist.pop_back_val();
if (!Visited.insert(I)) continue;
- std::map<SCEVCallbackVH, const SCEV*>::iterator It =
+ std::map<SCEVCallbackVH, const SCEV *>::iterator It =
+ Scalars.find(static_cast<Value *>(I));
+ if (It != Scalars.end()) {
+ ValuesAtScopes.erase(It->second);
+ Scalars.erase(It);
+ if (PHINode *PN = dyn_cast<PHINode>(I))
+ ConstantEvolutionLoopExitValue.erase(PN);
+ }
+
+ PushDefUseChildren(I, Worklist);
+ }
+}
+
+/// forgetValue - This method should be called by the client when it has
+/// changed a value in a way that may effect its value, or which may
+/// disconnect it from a def-use chain linking it to a loop.
+void ScalarEvolution::forgetValue(Value *V) {
+ Instruction *I = dyn_cast<Instruction>(V);
+ if (!I) return;
+
+ // Drop information about expressions based on loop-header PHIs.
+ SmallVector<Instruction *, 16> Worklist;
+ Worklist.push_back(I);
+
+ SmallPtrSet<Instruction *, 8> Visited;
+ while (!Worklist.empty()) {
+ I = Worklist.pop_back_val();
+ if (!Visited.insert(I)) continue;
+
+ std::map<SCEVCallbackVH, const SCEV *>::iterator It =
Scalars.find(static_cast<Value *>(I));
if (It != Scalars.end()) {
ValuesAtScopes.erase(It->second);
/// of the specified loop will execute.
ScalarEvolution::BackedgeTakenInfo
ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
- SmallVector<BasicBlock*, 8> ExitingBlocks;
+ SmallVector<BasicBlock *, 8> ExitingBlocks;
L->getExitingBlocks(ExitingBlocks);
// Examine all exits and pick the most conservative values.
return getCouldNotCompute();
}
- // Procede to the next level to examine the exit condition expression.
+ // Proceed to the next level to examine the exit condition expression.
return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
ExitBr->getSuccessor(0),
ExitBr->getSuccessor(1));
}
// With an icmp, it may be feasible to compute an exact backedge-taken count.
- // Procede to the next level to examine the icmp.
+ // Proceed to the next level to examine the icmp.
if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
+ // Check for a constant condition. These are normally stripped out by
+ // SimplifyCFG, but ScalarEvolution may be used by a pass which wishes to
+ // preserve the CFG and is temporarily leaving constant conditions
+ // in place.
+ if (ConstantInt *CI = dyn_cast<ConstantInt>(ExitCond)) {
+ if (L->contains(FBB) == !CI->getZExtValue())
+ // The backedge is always taken.
+ return getCouldNotCompute();
+ else
+ // The backedge is never taken.
+ return getIntegerSCEV(0, CI->getType());
+ }
+
// If it's not an integer or pointer comparison then compute it the hard way.
return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
}
// Handle common loops like: for (X = "string"; *X; ++X)
if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
- const SCEV *ItCnt =
+ BackedgeTakenInfo ItCnt =
ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
- if (!isa<SCEVCouldNotCompute>(ItCnt)) {
- unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
- return BackedgeTakenInfo(ItCnt,
- isa<SCEVConstant>(ItCnt) ? ItCnt :
- getConstant(APInt::getMaxValue(BitWidth)-1));
- }
+ if (ItCnt.hasAnyInfo())
+ return ItCnt;
}
const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
switch (Cond) {
case ICmpInst::ICMP_NE: { // while (X != Y)
// Convert to: while (X-Y != 0)
- const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
- if (!isa<SCEVCouldNotCompute>(TC)) return TC;
+ BackedgeTakenInfo BTI = HowFarToZero(getMinusSCEV(LHS, RHS), L);
+ if (BTI.hasAnyInfo()) return BTI;
break;
}
case ICmpInst::ICMP_EQ: { // while (X == Y)
// Convert to: while (X-Y == 0)
- const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
- if (!isa<SCEVCouldNotCompute>(TC)) return TC;
+ BackedgeTakenInfo BTI = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
+ if (BTI.hasAnyInfo()) return BTI;
break;
}
case ICmpInst::ICMP_SLT: {
}
default:
#if 0
- errs() << "ComputeBackedgeTakenCount ";
+ dbgs() << "ComputeBackedgeTakenCount ";
if (ExitCond->getOperand(0)->getType()->isUnsigned())
- errs() << "[unsigned] ";
- errs() << *LHS << " "
+ dbgs() << "[unsigned] ";
+ dbgs() << *LHS << " "
<< Instruction::getOpcodeName(Instruction::ICmp)
<< " " << *RHS << "\n";
#endif
/// the addressed element of the initializer or null if the index expression is
/// invalid.
static Constant *
-GetAddressedElementFromGlobal(LLVMContext &Context, GlobalVariable *GV,
+GetAddressedElementFromGlobal(GlobalVariable *GV,
const std::vector<ConstantInt*> &Indices) {
Constant *Init = GV->getInitializer();
for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
/// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
/// 'icmp op load X, cst', try to see if we can compute the backedge
/// execution count.
-const SCEV *
+ScalarEvolution::BackedgeTakenInfo
ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
LoadInst *LI,
Constant *RHS,
if (LI->isVolatile()) return getCouldNotCompute();
// Check to see if the loaded pointer is a getelementptr of a global.
+ // TODO: Use SCEV instead of manually grubbing with GEPs.
GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
if (!GEP) return getCouldNotCompute();
// Form the GEP offset.
Indexes[VarIdxNum] = Val;
- Constant *Result = GetAddressedElementFromGlobal(getContext(), GV, Indexes);
+ Constant *Result = GetAddressedElementFromGlobal(GV, Indexes);
if (Result == 0) break; // Cannot compute!
// Evaluate the condition for this iteration.
if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
#if 0
- errs() << "\n***\n*** Computed loop count " << *ItCst
+ dbgs() << "\n***\n*** Computed loop count " << *ItCst
<< "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
<< "***\n";
#endif
// If this is not an instruction, or if this is an instruction outside of the
// loop, it can't be derived from a loop PHI.
Instruction *I = dyn_cast<Instruction>(V);
- if (I == 0 || !L->contains(I->getParent())) return 0;
+ if (I == 0 || !L->contains(I)) return 0;
if (PHINode *PN = dyn_cast<PHINode>(I)) {
if (L->getHeader() == I->getParent())
/// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
/// in the loop has the value PHIVal. If we can't fold this expression for some
/// reason, return null.
-static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
+static Constant *EvaluateExpression(Value *V, Constant *PHIVal,
+ const TargetData *TD) {
if (isa<PHINode>(V)) return PHIVal;
if (Constant *C = dyn_cast<Constant>(V)) return C;
if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
Instruction *I = cast<Instruction>(V);
- LLVMContext &Context = I->getParent()->getContext();
std::vector<Constant*> Operands;
Operands.resize(I->getNumOperands());
for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
- Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
+ Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal, TD);
if (Operands[i] == 0) return 0;
}
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
- return ConstantFoldCompareInstOperands(CI->getPredicate(),
- &Operands[0], Operands.size(),
- Context);
- else
- return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(),
- Context);
+ return ConstantFoldCompareInstOperands(CI->getPredicate(), Operands[0],
+ Operands[1], TD);
+ return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
+ &Operands[0], Operands.size(), TD);
}
/// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
/// involving constants, fold it.
Constant *
ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
- const APInt& BEs,
+ const APInt &BEs,
const Loop *L) {
std::map<PHINode*, Constant*>::iterator I =
ConstantEvolutionLoopExitValue.find(PN);
if (I != ConstantEvolutionLoopExitValue.end())
return I->second;
- if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
+ if (BEs.ugt(MaxBruteForceIterations))
return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
return RetVal = PHIVal; // Got exit value!
// Compute the value of the PHI node for the next iteration.
- Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
+ Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
if (NextPHI == PHIVal)
return RetVal = NextPHI; // Stopped evolving!
if (NextPHI == 0)
for (Constant *PHIVal = StartCST;
IterationNum != MaxIterations; ++IterationNum) {
ConstantInt *CondVal =
- dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
+ dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal, TD));
// Couldn't symbolically evaluate.
if (!CondVal) return getCouldNotCompute();
}
// Compute the value of the PHI node for the next iteration.
- Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
+ Constant *NextPHI = EvaluateExpression(BEValue, PHIVal, TD);
if (NextPHI == 0 || NextPHI == PHIVal)
return getCouldNotCompute();// Couldn't evaluate or not making progress...
PHIVal = NextPHI;
if (!isSCEVable(Op->getType()))
return V;
- const SCEV* OpV = getSCEVAtScope(Op, L);
+ const SCEV *OpV = getSCEVAtScope(Op, L);
if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
Constant *C = SC->getValue();
if (C->getType() != Op->getType())
}
}
- Constant *C;
+ Constant *C = 0;
if (const CmpInst *CI = dyn_cast<CmpInst>(I))
C = ConstantFoldCompareInstOperands(CI->getPredicate(),
- &Operands[0], Operands.size(),
- getContext());
+ Operands[0], Operands[1], TD);
else
C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
- &Operands[0], Operands.size(),
- getContext());
- return getSCEV(C);
+ &Operands[0], Operands.size(), TD);
+ if (C)
+ return getSCEV(C);
}
}
// If this is a loop recurrence for a loop that does not contain L, then we
// are dealing with the final value computed by the loop.
if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
- if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
+ if (!L || !AddRec->getLoop()->contains(L)) {
// To evaluate this recurrence, we need to know how many times the AddRec
// loop iterates. Compute this now.
const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
return getTruncateExpr(Op, Cast->getType());
}
- if (isa<SCEVTargetDataConstant>(V))
- return V;
-
llvm_unreachable("Unknown SCEV type!");
return 0;
}
/// HowFarToZero - Return the number of times a backedge comparing the specified
/// value to zero will execute. If not computable, return CouldNotCompute.
-const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
+ScalarEvolution::BackedgeTakenInfo
+ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
// If the value is a constant
if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
// If the value is already zero, the branch will execute zero times.
-StartC->getValue()->getValue(),
*this);
}
- } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
+ } else if (AddRec->isQuadratic() && AddRec->getType()->isIntegerTy()) {
// If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
// the quadratic equation to solve it.
std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
if (R1) {
#if 0
- errs() << "HFTZ: " << *V << " - sol#1: " << *R1
+ dbgs() << "HFTZ: " << *V << " - sol#1: " << *R1
<< " sol#2: " << *R2 << "\n";
#endif
// Pick the smallest positive root value.
/// HowFarToNonZero - Return the number of times a backedge checking the
/// specified value for nonzero will execute. If not computable, return
/// CouldNotCompute
-const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
+ScalarEvolution::BackedgeTakenInfo
+ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
// Loops that look like: while (X == 0) are very strange indeed. We don't
// handle them yet except for the trivial case. This could be expanded in the
// future as needed.
/// getLoopPredecessor - If the given loop's header has exactly one unique
/// predecessor outside the loop, return it. Otherwise return null.
+/// This is less strict that the loop "preheader" concept, which requires
+/// the predecessor to have only one single successor.
///
BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
BasicBlock *Header = L->getHeader();
/// successor from which BB is reachable, or null if no such block is
/// found.
///
-BasicBlock *
+std::pair<BasicBlock *, BasicBlock *>
ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
// If the block has a unique predecessor, then there is no path from the
// predecessor to the block that does not go through the direct edge
// from the predecessor to the block.
if (BasicBlock *Pred = BB->getSinglePredecessor())
- return Pred;
+ return std::make_pair(Pred, BB);
// A loop's header is defined to be a block that dominates the loop.
// If the header has a unique predecessor outside the loop, it must be
// a block that has exactly one successor that can reach the loop.
if (Loop *L = LI->getLoopFor(BB))
- return getLoopPredecessor(L);
+ return std::make_pair(getLoopPredecessor(L), L->getHeader());
- return 0;
+ return std::pair<BasicBlock *, BasicBlock *>();
}
/// HasSameValue - SCEV structural equivalence is usually sufficient for
bool ScalarEvolution::isKnownPredicate(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS) {
+ // If LHS or RHS is an addrec, check to see if the condition is true in
+ // every iteration of the loop.
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, AR->getStart(), RHS) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred,
+ getAddExpr(AR, AR->getStepRecurrence(*this)), RHS))
+ return true;
+ if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(RHS))
+ if (isLoopEntryGuardedByCond(
+ AR->getLoop(), Pred, LHS, AR->getStart()) &&
+ isLoopBackedgeGuardedByCond(
+ AR->getLoop(), Pred,
+ LHS, getAddExpr(AR, AR->getStepRecurrence(*this))))
+ return true;
+ // Otherwise see what can be done with known constant ranges.
+ return isKnownPredicateWithRanges(Pred, LHS, RHS);
+}
+
+bool
+ScalarEvolution::isKnownPredicateWithRanges(ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
if (HasSameValue(LHS, RHS))
return ICmpInst::isTrueWhenEqual(Pred);
+ // This code is split out from isKnownPredicate because it is called from
+ // within isLoopEntryGuardedByCond.
switch (Pred) {
default:
llvm_unreachable("Unexpected ICmpInst::Predicate value!");
LoopContinuePredicate->getSuccessor(0) != L->getHeader());
}
-/// isLoopGuardedByCond - Test whether entry to the loop is protected
+/// isLoopEntryGuardedByCond - Test whether entry to the loop is protected
/// by a conditional between LHS and RHS. This is used to help avoid max
/// expressions in loop trip counts, and to eliminate casts.
bool
-ScalarEvolution::isLoopGuardedByCond(const Loop *L,
- ICmpInst::Predicate Pred,
- const SCEV *LHS, const SCEV *RHS) {
+ScalarEvolution::isLoopEntryGuardedByCond(const Loop *L,
+ ICmpInst::Predicate Pred,
+ const SCEV *LHS, const SCEV *RHS) {
// Interpret a null as meaning no loop, where there is obviously no guard
// (interprocedural conditions notwithstanding).
if (!L) return false;
- BasicBlock *Predecessor = getLoopPredecessor(L);
- BasicBlock *PredecessorDest = L->getHeader();
-
// Starting at the loop predecessor, climb up the predecessor chain, as long
// as there are predecessors that can be found that have unique successors
// leading to the original header.
- for (; Predecessor;
- PredecessorDest = Predecessor,
- Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
+ for (std::pair<BasicBlock *, BasicBlock *>
+ Pair(getLoopPredecessor(L), L->getHeader());
+ Pair.first;
+ Pair = getPredecessorWithUniqueSuccessorForBB(Pair.first)) {
BranchInst *LoopEntryPredicate =
- dyn_cast<BranchInst>(Predecessor->getTerminator());
+ dyn_cast<BranchInst>(Pair.first->getTerminator());
if (!LoopEntryPredicate ||
LoopEntryPredicate->isUnconditional())
continue;
if (isImpliedCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
- LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
+ LoopEntryPredicate->getSuccessor(0) != Pair.second))
return true;
}
ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
bool Inverse) {
- // Recursivly handle And and Or conditions.
+ // Recursively handle And and Or conditions.
if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
if (BO->getOpcode() == Instruction::And) {
if (!Inverse)
}
/// isImpliedCondOperands - Test whether the condition described by Pred,
-/// LHS, and RHS is true whenever the condition desribed by Pred, FoundLHS,
+/// LHS, and RHS is true whenever the condition described by Pred, FoundLHS,
/// and FoundRHS is true.
bool ScalarEvolution::isImpliedCondOperands(ICmpInst::Predicate Pred,
const SCEV *LHS, const SCEV *RHS,
}
/// isImpliedCondOperandsHelper - Test whether the condition described by
-/// Pred, LHS, and RHS is true whenever the condition desribed by Pred,
+/// Pred, LHS, and RHS is true whenever the condition described by Pred,
/// FoundLHS, and FoundRHS is true.
bool
ScalarEvolution::isImpliedCondOperandsHelper(ICmpInst::Predicate Pred,
break;
case ICmpInst::ICMP_SLT:
case ICmpInst::ICMP_SLE:
- if (isKnownPredicate(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_SGT:
case ICmpInst::ICMP_SGE:
- if (isKnownPredicate(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_SLE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_SGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_SLE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_ULT:
case ICmpInst::ICMP_ULE:
- if (isKnownPredicate(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_UGE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, RHS, FoundRHS))
return true;
break;
case ICmpInst::ICMP_UGT:
case ICmpInst::ICMP_UGE:
- if (isKnownPredicate(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
- isKnownPredicate(ICmpInst::ICMP_ULE, RHS, FoundRHS))
+ if (isKnownPredicateWithRanges(ICmpInst::ICMP_UGE, LHS, FoundLHS) &&
+ isKnownPredicateWithRanges(ICmpInst::ICMP_ULE, RHS, FoundRHS))
return true;
break;
}
const SCEV *End,
const SCEV *Step,
bool NoWrap) {
+ assert(!isKnownNegative(Step) &&
+ "This code doesn't handle negative strides yet!");
+
const Type *Ty = Start->getType();
const SCEV *NegOne = getIntegerSCEV(-1, Ty);
const SCEV *Diff = getMinusSCEV(End, Start);
AddRec->hasNoUnsignedWrap();
if (AddRec->isAffine()) {
- // FORNOW: We only support unit strides.
unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
const SCEV *Step = AddRec->getStepRecurrence(*this);
- // TODO: handle non-constant strides.
- const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
- if (!CStep || CStep->isZero())
+ if (Step->isZero())
return getCouldNotCompute();
- if (CStep->isOne()) {
+ if (Step->isOne()) {
// With unit stride, the iteration never steps past the limit value.
- } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
- if (NoWrap) {
- // We know the iteration won't step past the maximum value for its type.
- ;
- } else if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
- // Test whether a positive iteration iteration can step past the limit
- // value and past the maximum value for its type in a single step.
- if (isSigned) {
- APInt Max = APInt::getSignedMaxValue(BitWidth);
- if ((Max - CStep->getValue()->getValue())
- .slt(CLimit->getValue()->getValue()))
- return getCouldNotCompute();
- } else {
- APInt Max = APInt::getMaxValue(BitWidth);
- if ((Max - CStep->getValue()->getValue())
- .ult(CLimit->getValue()->getValue()))
- return getCouldNotCompute();
- }
- } else
- // TODO: handle non-constant limit values below.
- return getCouldNotCompute();
+ } else if (isKnownPositive(Step)) {
+ // Test whether a positive iteration can step past the limit
+ // value and past the maximum value for its type in a single step.
+ // Note that it's not sufficient to check NoWrap here, because even
+ // though the value after a wrap is undefined, it's not undefined
+ // behavior, so if wrap does occur, the loop could either terminate or
+ // loop infinitely, but in either case, the loop is guaranteed to
+ // iterate at least until the iteration where the wrapping occurs.
+ const SCEV *One = getIntegerSCEV(1, Step->getType());
+ if (isSigned) {
+ APInt Max = APInt::getSignedMaxValue(BitWidth);
+ if ((Max - getSignedRange(getMinusSCEV(Step, One)).getSignedMax())
+ .slt(getSignedRange(RHS).getSignedMax()))
+ return getCouldNotCompute();
+ } else {
+ APInt Max = APInt::getMaxValue(BitWidth);
+ if ((Max - getUnsignedRange(getMinusSCEV(Step, One)).getUnsignedMax())
+ .ult(getUnsignedRange(RHS).getUnsignedMax()))
+ return getCouldNotCompute();
+ }
} else
- // TODO: handle negative strides below.
+ // TODO: Handle negative strides here and below.
return getCouldNotCompute();
// We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
// only know that it will execute (max(m,n)-n)/s times. In both cases,
// the division must round up.
const SCEV *End = RHS;
- if (!isLoopGuardedByCond(L,
- isSigned ? ICmpInst::ICMP_SLT :
- ICmpInst::ICMP_ULT,
- getMinusSCEV(Start, Step), RHS))
+ if (!isLoopEntryGuardedByCond(L,
+ isSigned ? ICmpInst::ICMP_SLT :
+ ICmpInst::ICMP_ULT,
+ getMinusSCEV(Start, Step), RHS))
End = isSigned ? getSMaxExpr(RHS, Start)
: getUMaxExpr(RHS, Start);
getSignedRange(End).getSignedMax() :
getUnsignedRange(End).getUnsignedMax());
+ // If MaxEnd is within a step of the maximum integer value in its type,
+ // adjust it down to the minimum value which would produce the same effect.
+ // This allows the subsequent ceiling division of (N+(step-1))/step to
+ // compute the correct value.
+ const SCEV *StepMinusOne = getMinusSCEV(Step,
+ getIntegerSCEV(1, Step->getType()));
+ MaxEnd = isSigned ?
+ getSMinExpr(MaxEnd,
+ getMinusSCEV(getConstant(APInt::getSignedMaxValue(BitWidth)),
+ StepMinusOne)) :
+ getUMinExpr(MaxEnd,
+ getMinusSCEV(getConstant(APInt::getMaxValue(BitWidth)),
+ StepMinusOne));
+
// Finally, we subtract these two values and divide, rounding up, to get
// the number of times the backedge is executed.
const SCEV *BECount = getBECount(Start, End, Step, NoWrap);
// ScalarEvolution Class Implementation
//===----------------------------------------------------------------------===//
-FunctionPass *createScalarEvolutionPass() { return new ScalarEvolution(); }
-
ScalarEvolution::ScalarEvolution()
: FunctionPass(&ID) {
}
this->F = &F;
LI = &getAnalysis<LoopInfo>();
TD = getAnalysisIfAvailable<TargetData>();
+ DT = &getAnalysis<DominatorTree>();
return false;
}
void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
AU.setPreservesAll();
AU.addRequiredTransitive<LoopInfo>();
+ AU.addRequiredTransitive<DominatorTree>();
}
bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
PrintLoopInfo(OS, SE, *I);
- OS << "Loop " << L->getHeader()->getName() << ": ";
+ OS << "Loop ";
+ WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
+ OS << ": ";
- SmallVector<BasicBlock*, 8> ExitBlocks;
+ SmallVector<BasicBlock *, 8> ExitBlocks;
L->getExitBlocks(ExitBlocks);
if (ExitBlocks.size() != 1)
OS << "<multiple exits> ";
OS << "Unpredictable backedge-taken count. ";
}
- OS << "\n";
- OS << "Loop " << L->getHeader()->getName() << ": ";
+ OS << "\n"
+ "Loop ";
+ WriteAsOperand(OS, L->getHeader(), /*PrintType=*/false);
+ OS << ": ";
if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
OS << "\n";
}
-void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
- // ScalarEvolution's implementaiton of the print method is to print
+void ScalarEvolution::print(raw_ostream &OS, const Module *) const {
+ // ScalarEvolution's implementation of the print method is to print
// out SCEV values of all instructions that are interesting. Doing
// this potentially causes it to create new SCEV objects though,
// which technically conflicts with the const qualifier. This isn't
// observable from outside the class though, so casting away the
// const isn't dangerous.
- ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
+ ScalarEvolution &SE = *const_cast<ScalarEvolution *>(this);
- OS << "Classifying expressions for: " << F->getName() << "\n";
+ OS << "Classifying expressions for: ";
+ WriteAsOperand(OS, F, /*PrintType=*/false);
+ OS << "\n";
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
if (isSCEVable(I->getType())) {
OS << *I << '\n';
OS << "\n";
}
- OS << "Determining loop execution counts for: " << F->getName() << "\n";
+ OS << "Determining loop execution counts for: ";
+ WriteAsOperand(OS, F, /*PrintType=*/false);
+ OS << "\n";
for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
PrintLoopInfo(OS, &SE, *I);
}