1 //===- ScalarEvolution.cpp - Scalar Evolution Analysis ----------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file contains the implementation of the scalar evolution analysis
11 // engine, which is used primarily to analyze expressions involving induction
12 // variables in loops.
14 // There are several aspects to this library. First is the representation of
15 // scalar expressions, which are represented as subclasses of the SCEV class.
16 // These classes are used to represent certain types of subexpressions that we
17 // can handle. These classes are reference counted, managed by the const SCEV *
18 // class. We only create one SCEV of a particular shape, so pointer-comparisons
19 // for equality are legal.
21 // One important aspect of the SCEV objects is that they are never cyclic, even
22 // if there is a cycle in the dataflow for an expression (ie, a PHI node). If
23 // the PHI node is one of the idioms that we can represent (e.g., a polynomial
24 // recurrence) then we represent it directly as a recurrence node, otherwise we
25 // represent it as a SCEVUnknown node.
27 // In addition to being able to represent expressions of various types, we also
28 // have folders that are used to build the *canonical* representation for a
29 // particular expression. These folders are capable of using a variety of
30 // rewrite rules to simplify the expressions.
32 // Once the folders are defined, we can implement the more interesting
33 // higher-level code, such as the code that recognizes PHI nodes of various
34 // types, computes the execution count of a loop, etc.
36 // TODO: We should use these routines and value representations to implement
37 // dependence analysis!
39 //===----------------------------------------------------------------------===//
41 // There are several good references for the techniques used in this analysis.
43 // Chains of recurrences -- a method to expedite the evaluation
44 // of closed-form functions
45 // Olaf Bachmann, Paul S. Wang, Eugene V. Zima
47 // On computational properties of chains of recurrences
50 // Symbolic Evaluation of Chains of Recurrences for Loop Optimization
51 // Robert A. van Engelen
53 // Efficient Symbolic Analysis for Optimizing Compilers
54 // Robert A. van Engelen
56 // Using the chains of recurrences algebra for data dependence testing and
57 // induction variable substitution
58 // MS Thesis, Johnie Birch
60 //===----------------------------------------------------------------------===//
62 #define DEBUG_TYPE "scalar-evolution"
63 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
64 #include "llvm/Constants.h"
65 #include "llvm/DerivedTypes.h"
66 #include "llvm/GlobalVariable.h"
67 #include "llvm/Instructions.h"
68 #include "llvm/LLVMContext.h"
69 #include "llvm/Analysis/ConstantFolding.h"
70 #include "llvm/Analysis/Dominators.h"
71 #include "llvm/Analysis/LoopInfo.h"
72 #include "llvm/Analysis/ValueTracking.h"
73 #include "llvm/Assembly/Writer.h"
74 #include "llvm/Target/TargetData.h"
75 #include "llvm/Support/CommandLine.h"
76 #include "llvm/Support/Compiler.h"
77 #include "llvm/Support/ConstantRange.h"
78 #include "llvm/Support/ErrorHandling.h"
79 #include "llvm/Support/GetElementPtrTypeIterator.h"
80 #include "llvm/Support/InstIterator.h"
81 #include "llvm/Support/MathExtras.h"
82 #include "llvm/Support/raw_ostream.h"
83 #include "llvm/ADT/Statistic.h"
84 #include "llvm/ADT/STLExtras.h"
85 #include "llvm/ADT/SmallPtrSet.h"
89 STATISTIC(NumArrayLenItCounts,
90 "Number of trip counts computed with array length");
91 STATISTIC(NumTripCountsComputed,
92 "Number of loops with predictable loop counts");
93 STATISTIC(NumTripCountsNotComputed,
94 "Number of loops without predictable loop counts");
95 STATISTIC(NumBruteForceTripCountsComputed,
96 "Number of loops with trip counts computed by force");
98 static cl::opt<unsigned>
99 MaxBruteForceIterations("scalar-evolution-max-iterations", cl::ReallyHidden,
100 cl::desc("Maximum number of iterations SCEV will "
101 "symbolically execute a constant "
105 static RegisterPass<ScalarEvolution>
106 R("scalar-evolution", "Scalar Evolution Analysis", false, true);
107 char ScalarEvolution::ID = 0;
109 //===----------------------------------------------------------------------===//
110 // SCEV class definitions
111 //===----------------------------------------------------------------------===//
113 //===----------------------------------------------------------------------===//
114 // Implementation of the SCEV class.
119 void SCEV::dump() const {
124 void SCEV::print(std::ostream &o) const {
125 raw_os_ostream OS(o);
129 bool SCEV::isZero() const {
130 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
131 return SC->getValue()->isZero();
135 bool SCEV::isOne() const {
136 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
137 return SC->getValue()->isOne();
141 bool SCEV::isAllOnesValue() const {
142 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(this))
143 return SC->getValue()->isAllOnesValue();
147 SCEVCouldNotCompute::SCEVCouldNotCompute() :
148 SCEV(FoldingSetNodeID(), scCouldNotCompute) {}
150 bool SCEVCouldNotCompute::isLoopInvariant(const Loop *L) const {
151 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
155 const Type *SCEVCouldNotCompute::getType() const {
156 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
160 bool SCEVCouldNotCompute::hasComputableLoopEvolution(const Loop *L) const {
161 LLVM_UNREACHABLE("Attempt to use a SCEVCouldNotCompute object!");
166 SCEVCouldNotCompute::replaceSymbolicValuesWithConcrete(
169 ScalarEvolution &SE) const {
173 void SCEVCouldNotCompute::print(raw_ostream &OS) const {
174 OS << "***COULDNOTCOMPUTE***";
177 bool SCEVCouldNotCompute::classof(const SCEV *S) {
178 return S->getSCEVType() == scCouldNotCompute;
181 const SCEV *ScalarEvolution::getConstant(ConstantInt *V) {
183 ID.AddInteger(scConstant);
186 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
187 SCEV *S = SCEVAllocator.Allocate<SCEVConstant>();
188 new (S) SCEVConstant(ID, V);
189 UniqueSCEVs.InsertNode(S, IP);
193 const SCEV *ScalarEvolution::getConstant(const APInt& Val) {
194 return getConstant(ConstantInt::get(Val));
198 ScalarEvolution::getConstant(const Type *Ty, uint64_t V, bool isSigned) {
199 return getConstant(ConstantInt::get(cast<IntegerType>(Ty), V, isSigned));
202 const Type *SCEVConstant::getType() const { return V->getType(); }
204 void SCEVConstant::print(raw_ostream &OS) const {
205 WriteAsOperand(OS, V, false);
208 SCEVCastExpr::SCEVCastExpr(const FoldingSetNodeID &ID,
209 unsigned SCEVTy, const SCEV *op, const Type *ty)
210 : SCEV(ID, SCEVTy), Op(op), Ty(ty) {}
212 bool SCEVCastExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
213 return Op->dominates(BB, DT);
216 SCEVTruncateExpr::SCEVTruncateExpr(const FoldingSetNodeID &ID,
217 const SCEV *op, const Type *ty)
218 : SCEVCastExpr(ID, scTruncate, op, ty) {
219 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
220 (Ty->isInteger() || isa<PointerType>(Ty)) &&
221 "Cannot truncate non-integer value!");
224 void SCEVTruncateExpr::print(raw_ostream &OS) const {
225 OS << "(trunc " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
228 SCEVZeroExtendExpr::SCEVZeroExtendExpr(const FoldingSetNodeID &ID,
229 const SCEV *op, const Type *ty)
230 : SCEVCastExpr(ID, scZeroExtend, op, ty) {
231 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
232 (Ty->isInteger() || isa<PointerType>(Ty)) &&
233 "Cannot zero extend non-integer value!");
236 void SCEVZeroExtendExpr::print(raw_ostream &OS) const {
237 OS << "(zext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
240 SCEVSignExtendExpr::SCEVSignExtendExpr(const FoldingSetNodeID &ID,
241 const SCEV *op, const Type *ty)
242 : SCEVCastExpr(ID, scSignExtend, op, ty) {
243 assert((Op->getType()->isInteger() || isa<PointerType>(Op->getType())) &&
244 (Ty->isInteger() || isa<PointerType>(Ty)) &&
245 "Cannot sign extend non-integer value!");
248 void SCEVSignExtendExpr::print(raw_ostream &OS) const {
249 OS << "(sext " << *Op->getType() << " " << *Op << " to " << *Ty << ")";
252 void SCEVCommutativeExpr::print(raw_ostream &OS) const {
253 assert(Operands.size() > 1 && "This plus expr shouldn't exist!");
254 const char *OpStr = getOperationStr();
255 OS << "(" << *Operands[0];
256 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
257 OS << OpStr << *Operands[i];
262 SCEVCommutativeExpr::replaceSymbolicValuesWithConcrete(
265 ScalarEvolution &SE) const {
266 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
268 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
269 if (H != getOperand(i)) {
270 SmallVector<const SCEV *, 8> NewOps;
271 NewOps.reserve(getNumOperands());
272 for (unsigned j = 0; j != i; ++j)
273 NewOps.push_back(getOperand(j));
275 for (++i; i != e; ++i)
276 NewOps.push_back(getOperand(i)->
277 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
279 if (isa<SCEVAddExpr>(this))
280 return SE.getAddExpr(NewOps);
281 else if (isa<SCEVMulExpr>(this))
282 return SE.getMulExpr(NewOps);
283 else if (isa<SCEVSMaxExpr>(this))
284 return SE.getSMaxExpr(NewOps);
285 else if (isa<SCEVUMaxExpr>(this))
286 return SE.getUMaxExpr(NewOps);
288 LLVM_UNREACHABLE("Unknown commutative expr!");
294 bool SCEVNAryExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
295 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
296 if (!getOperand(i)->dominates(BB, DT))
302 bool SCEVUDivExpr::dominates(BasicBlock *BB, DominatorTree *DT) const {
303 return LHS->dominates(BB, DT) && RHS->dominates(BB, DT);
306 void SCEVUDivExpr::print(raw_ostream &OS) const {
307 OS << "(" << *LHS << " /u " << *RHS << ")";
310 const Type *SCEVUDivExpr::getType() const {
311 // In most cases the types of LHS and RHS will be the same, but in some
312 // crazy cases one or the other may be a pointer. ScalarEvolution doesn't
313 // depend on the type for correctness, but handling types carefully can
314 // avoid extra casts in the SCEVExpander. The LHS is more likely to be
315 // a pointer type than the RHS, so use the RHS' type here.
316 return RHS->getType();
320 SCEVAddRecExpr::replaceSymbolicValuesWithConcrete(const SCEV *Sym,
322 ScalarEvolution &SE) const {
323 for (unsigned i = 0, e = getNumOperands(); i != e; ++i) {
325 getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE);
326 if (H != getOperand(i)) {
327 SmallVector<const SCEV *, 8> NewOps;
328 NewOps.reserve(getNumOperands());
329 for (unsigned j = 0; j != i; ++j)
330 NewOps.push_back(getOperand(j));
332 for (++i; i != e; ++i)
333 NewOps.push_back(getOperand(i)->
334 replaceSymbolicValuesWithConcrete(Sym, Conc, SE));
336 return SE.getAddRecExpr(NewOps, L);
343 bool SCEVAddRecExpr::isLoopInvariant(const Loop *QueryLoop) const {
344 // Add recurrences are never invariant in the function-body (null loop).
348 // This recurrence is variant w.r.t. QueryLoop if QueryLoop contains L.
349 if (QueryLoop->contains(L->getHeader()))
352 // This recurrence is variant w.r.t. QueryLoop if any of its operands
354 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
355 if (!getOperand(i)->isLoopInvariant(QueryLoop))
358 // Otherwise it's loop-invariant.
362 void SCEVAddRecExpr::print(raw_ostream &OS) const {
363 OS << "{" << *Operands[0];
364 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
365 OS << ",+," << *Operands[i];
366 OS << "}<" << L->getHeader()->getName() + ">";
369 bool SCEVUnknown::isLoopInvariant(const Loop *L) const {
370 // All non-instruction values are loop invariant. All instructions are loop
371 // invariant if they are not contained in the specified loop.
372 // Instructions are never considered invariant in the function body
373 // (null loop) because they are defined within the "loop".
374 if (Instruction *I = dyn_cast<Instruction>(V))
375 return L && !L->contains(I->getParent());
379 bool SCEVUnknown::dominates(BasicBlock *BB, DominatorTree *DT) const {
380 if (Instruction *I = dyn_cast<Instruction>(getValue()))
381 return DT->dominates(I->getParent(), BB);
385 const Type *SCEVUnknown::getType() const {
389 void SCEVUnknown::print(raw_ostream &OS) const {
390 WriteAsOperand(OS, V, false);
393 //===----------------------------------------------------------------------===//
395 //===----------------------------------------------------------------------===//
398 /// SCEVComplexityCompare - Return true if the complexity of the LHS is less
399 /// than the complexity of the RHS. This comparator is used to canonicalize
401 class VISIBILITY_HIDDEN SCEVComplexityCompare {
404 explicit SCEVComplexityCompare(LoopInfo *li) : LI(li) {}
406 bool operator()(const SCEV *LHS, const SCEV *RHS) const {
407 // Primarily, sort the SCEVs by their getSCEVType().
408 if (LHS->getSCEVType() != RHS->getSCEVType())
409 return LHS->getSCEVType() < RHS->getSCEVType();
411 // Aside from the getSCEVType() ordering, the particular ordering
412 // isn't very important except that it's beneficial to be consistent,
413 // so that (a + b) and (b + a) don't end up as different expressions.
415 // Sort SCEVUnknown values with some loose heuristics. TODO: This is
416 // not as complete as it could be.
417 if (const SCEVUnknown *LU = dyn_cast<SCEVUnknown>(LHS)) {
418 const SCEVUnknown *RU = cast<SCEVUnknown>(RHS);
420 // Order pointer values after integer values. This helps SCEVExpander
422 if (isa<PointerType>(LU->getType()) && !isa<PointerType>(RU->getType()))
424 if (isa<PointerType>(RU->getType()) && !isa<PointerType>(LU->getType()))
427 // Compare getValueID values.
428 if (LU->getValue()->getValueID() != RU->getValue()->getValueID())
429 return LU->getValue()->getValueID() < RU->getValue()->getValueID();
431 // Sort arguments by their position.
432 if (const Argument *LA = dyn_cast<Argument>(LU->getValue())) {
433 const Argument *RA = cast<Argument>(RU->getValue());
434 return LA->getArgNo() < RA->getArgNo();
437 // For instructions, compare their loop depth, and their opcode.
438 // This is pretty loose.
439 if (Instruction *LV = dyn_cast<Instruction>(LU->getValue())) {
440 Instruction *RV = cast<Instruction>(RU->getValue());
442 // Compare loop depths.
443 if (LI->getLoopDepth(LV->getParent()) !=
444 LI->getLoopDepth(RV->getParent()))
445 return LI->getLoopDepth(LV->getParent()) <
446 LI->getLoopDepth(RV->getParent());
449 if (LV->getOpcode() != RV->getOpcode())
450 return LV->getOpcode() < RV->getOpcode();
452 // Compare the number of operands.
453 if (LV->getNumOperands() != RV->getNumOperands())
454 return LV->getNumOperands() < RV->getNumOperands();
460 // Compare constant values.
461 if (const SCEVConstant *LC = dyn_cast<SCEVConstant>(LHS)) {
462 const SCEVConstant *RC = cast<SCEVConstant>(RHS);
463 if (LC->getValue()->getBitWidth() != RC->getValue()->getBitWidth())
464 return LC->getValue()->getBitWidth() < RC->getValue()->getBitWidth();
465 return LC->getValue()->getValue().ult(RC->getValue()->getValue());
468 // Compare addrec loop depths.
469 if (const SCEVAddRecExpr *LA = dyn_cast<SCEVAddRecExpr>(LHS)) {
470 const SCEVAddRecExpr *RA = cast<SCEVAddRecExpr>(RHS);
471 if (LA->getLoop()->getLoopDepth() != RA->getLoop()->getLoopDepth())
472 return LA->getLoop()->getLoopDepth() < RA->getLoop()->getLoopDepth();
475 // Lexicographically compare n-ary expressions.
476 if (const SCEVNAryExpr *LC = dyn_cast<SCEVNAryExpr>(LHS)) {
477 const SCEVNAryExpr *RC = cast<SCEVNAryExpr>(RHS);
478 for (unsigned i = 0, e = LC->getNumOperands(); i != e; ++i) {
479 if (i >= RC->getNumOperands())
481 if (operator()(LC->getOperand(i), RC->getOperand(i)))
483 if (operator()(RC->getOperand(i), LC->getOperand(i)))
486 return LC->getNumOperands() < RC->getNumOperands();
489 // Lexicographically compare udiv expressions.
490 if (const SCEVUDivExpr *LC = dyn_cast<SCEVUDivExpr>(LHS)) {
491 const SCEVUDivExpr *RC = cast<SCEVUDivExpr>(RHS);
492 if (operator()(LC->getLHS(), RC->getLHS()))
494 if (operator()(RC->getLHS(), LC->getLHS()))
496 if (operator()(LC->getRHS(), RC->getRHS()))
498 if (operator()(RC->getRHS(), LC->getRHS()))
503 // Compare cast expressions by operand.
504 if (const SCEVCastExpr *LC = dyn_cast<SCEVCastExpr>(LHS)) {
505 const SCEVCastExpr *RC = cast<SCEVCastExpr>(RHS);
506 return operator()(LC->getOperand(), RC->getOperand());
509 LLVM_UNREACHABLE("Unknown SCEV kind!");
515 /// GroupByComplexity - Given a list of SCEV objects, order them by their
516 /// complexity, and group objects of the same complexity together by value.
517 /// When this routine is finished, we know that any duplicates in the vector are
518 /// consecutive and that complexity is monotonically increasing.
520 /// Note that we go take special precautions to ensure that we get determinstic
521 /// results from this routine. In other words, we don't want the results of
522 /// this to depend on where the addresses of various SCEV objects happened to
525 static void GroupByComplexity(SmallVectorImpl<const SCEV *> &Ops,
527 if (Ops.size() < 2) return; // Noop
528 if (Ops.size() == 2) {
529 // This is the common case, which also happens to be trivially simple.
531 if (SCEVComplexityCompare(LI)(Ops[1], Ops[0]))
532 std::swap(Ops[0], Ops[1]);
536 // Do the rough sort by complexity.
537 std::stable_sort(Ops.begin(), Ops.end(), SCEVComplexityCompare(LI));
539 // Now that we are sorted by complexity, group elements of the same
540 // complexity. Note that this is, at worst, N^2, but the vector is likely to
541 // be extremely short in practice. Note that we take this approach because we
542 // do not want to depend on the addresses of the objects we are grouping.
543 for (unsigned i = 0, e = Ops.size(); i != e-2; ++i) {
544 const SCEV *S = Ops[i];
545 unsigned Complexity = S->getSCEVType();
547 // If there are any objects of the same complexity and same value as this
549 for (unsigned j = i+1; j != e && Ops[j]->getSCEVType() == Complexity; ++j) {
550 if (Ops[j] == S) { // Found a duplicate.
551 // Move it to immediately after i'th element.
552 std::swap(Ops[i+1], Ops[j]);
553 ++i; // no need to rescan it.
554 if (i == e-2) return; // Done!
562 //===----------------------------------------------------------------------===//
563 // Simple SCEV method implementations
564 //===----------------------------------------------------------------------===//
566 /// BinomialCoefficient - Compute BC(It, K). The result has width W.
568 static const SCEV *BinomialCoefficient(const SCEV *It, unsigned K,
570 const Type* ResultTy) {
571 // Handle the simplest case efficiently.
573 return SE.getTruncateOrZeroExtend(It, ResultTy);
575 // We are using the following formula for BC(It, K):
577 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / K!
579 // Suppose, W is the bitwidth of the return value. We must be prepared for
580 // overflow. Hence, we must assure that the result of our computation is
581 // equal to the accurate one modulo 2^W. Unfortunately, division isn't
582 // safe in modular arithmetic.
584 // However, this code doesn't use exactly that formula; the formula it uses
585 // is something like the following, where T is the number of factors of 2 in
586 // K! (i.e. trailing zeros in the binary representation of K!), and ^ is
589 // BC(It, K) = (It * (It - 1) * ... * (It - K + 1)) / 2^T / (K! / 2^T)
591 // This formula is trivially equivalent to the previous formula. However,
592 // this formula can be implemented much more efficiently. The trick is that
593 // K! / 2^T is odd, and exact division by an odd number *is* safe in modular
594 // arithmetic. To do exact division in modular arithmetic, all we have
595 // to do is multiply by the inverse. Therefore, this step can be done at
598 // The next issue is how to safely do the division by 2^T. The way this
599 // is done is by doing the multiplication step at a width of at least W + T
600 // bits. This way, the bottom W+T bits of the product are accurate. Then,
601 // when we perform the division by 2^T (which is equivalent to a right shift
602 // by T), the bottom W bits are accurate. Extra bits are okay; they'll get
603 // truncated out after the division by 2^T.
605 // In comparison to just directly using the first formula, this technique
606 // is much more efficient; using the first formula requires W * K bits,
607 // but this formula less than W + K bits. Also, the first formula requires
608 // a division step, whereas this formula only requires multiplies and shifts.
610 // It doesn't matter whether the subtraction step is done in the calculation
611 // width or the input iteration count's width; if the subtraction overflows,
612 // the result must be zero anyway. We prefer here to do it in the width of
613 // the induction variable because it helps a lot for certain cases; CodeGen
614 // isn't smart enough to ignore the overflow, which leads to much less
615 // efficient code if the width of the subtraction is wider than the native
618 // (It's possible to not widen at all by pulling out factors of 2 before
619 // the multiplication; for example, K=2 can be calculated as
620 // It/2*(It+(It*INT_MIN/INT_MIN)+-1). However, it requires
621 // extra arithmetic, so it's not an obvious win, and it gets
622 // much more complicated for K > 3.)
624 // Protection from insane SCEVs; this bound is conservative,
625 // but it probably doesn't matter.
627 return SE.getCouldNotCompute();
629 unsigned W = SE.getTypeSizeInBits(ResultTy);
631 // Calculate K! / 2^T and T; we divide out the factors of two before
632 // multiplying for calculating K! / 2^T to avoid overflow.
633 // Other overflow doesn't matter because we only care about the bottom
634 // W bits of the result.
635 APInt OddFactorial(W, 1);
637 for (unsigned i = 3; i <= K; ++i) {
639 unsigned TwoFactors = Mult.countTrailingZeros();
641 Mult = Mult.lshr(TwoFactors);
642 OddFactorial *= Mult;
645 // We need at least W + T bits for the multiplication step
646 unsigned CalculationBits = W + T;
648 // Calcuate 2^T, at width T+W.
649 APInt DivFactor = APInt(CalculationBits, 1).shl(T);
651 // Calculate the multiplicative inverse of K! / 2^T;
652 // this multiplication factor will perform the exact division by
654 APInt Mod = APInt::getSignedMinValue(W+1);
655 APInt MultiplyFactor = OddFactorial.zext(W+1);
656 MultiplyFactor = MultiplyFactor.multiplicativeInverse(Mod);
657 MultiplyFactor = MultiplyFactor.trunc(W);
659 // Calculate the product, at width T+W
660 const IntegerType *CalculationTy = IntegerType::get(CalculationBits);
661 const SCEV *Dividend = SE.getTruncateOrZeroExtend(It, CalculationTy);
662 for (unsigned i = 1; i != K; ++i) {
663 const SCEV *S = SE.getMinusSCEV(It, SE.getIntegerSCEV(i, It->getType()));
664 Dividend = SE.getMulExpr(Dividend,
665 SE.getTruncateOrZeroExtend(S, CalculationTy));
669 const SCEV *DivResult = SE.getUDivExpr(Dividend, SE.getConstant(DivFactor));
671 // Truncate the result, and divide by K! / 2^T.
673 return SE.getMulExpr(SE.getConstant(MultiplyFactor),
674 SE.getTruncateOrZeroExtend(DivResult, ResultTy));
677 /// evaluateAtIteration - Return the value of this chain of recurrences at
678 /// the specified iteration number. We can evaluate this recurrence by
679 /// multiplying each element in the chain by the binomial coefficient
680 /// corresponding to it. In other words, we can evaluate {A,+,B,+,C,+,D} as:
682 /// A*BC(It, 0) + B*BC(It, 1) + C*BC(It, 2) + D*BC(It, 3)
684 /// where BC(It, k) stands for binomial coefficient.
686 const SCEV *SCEVAddRecExpr::evaluateAtIteration(const SCEV *It,
687 ScalarEvolution &SE) const {
688 const SCEV *Result = getStart();
689 for (unsigned i = 1, e = getNumOperands(); i != e; ++i) {
690 // The computation is correct in the face of overflow provided that the
691 // multiplication is performed _after_ the evaluation of the binomial
693 const SCEV *Coeff = BinomialCoefficient(It, i, SE, getType());
694 if (isa<SCEVCouldNotCompute>(Coeff))
697 Result = SE.getAddExpr(Result, SE.getMulExpr(getOperand(i), Coeff));
702 //===----------------------------------------------------------------------===//
703 // SCEV Expression folder implementations
704 //===----------------------------------------------------------------------===//
706 const SCEV *ScalarEvolution::getTruncateExpr(const SCEV *Op,
708 assert(getTypeSizeInBits(Op->getType()) > getTypeSizeInBits(Ty) &&
709 "This is not a truncating conversion!");
710 assert(isSCEVable(Ty) &&
711 "This is not a conversion to a SCEVable type!");
712 Ty = getEffectiveSCEVType(Ty);
715 ID.AddInteger(scTruncate);
719 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
721 // Fold if the operand is constant.
722 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
724 cast<ConstantInt>(ConstantExpr::getTrunc(SC->getValue(), Ty)));
726 // trunc(trunc(x)) --> trunc(x)
727 if (const SCEVTruncateExpr *ST = dyn_cast<SCEVTruncateExpr>(Op))
728 return getTruncateExpr(ST->getOperand(), Ty);
730 // trunc(sext(x)) --> sext(x) if widening or trunc(x) if narrowing
731 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
732 return getTruncateOrSignExtend(SS->getOperand(), Ty);
734 // trunc(zext(x)) --> zext(x) if widening or trunc(x) if narrowing
735 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
736 return getTruncateOrZeroExtend(SZ->getOperand(), Ty);
738 // If the input value is a chrec scev, truncate the chrec's operands.
739 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(Op)) {
740 SmallVector<const SCEV *, 4> Operands;
741 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
742 Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty));
743 return getAddRecExpr(Operands, AddRec->getLoop());
746 // The cast wasn't folded; create an explicit cast node.
747 // Recompute the insert position, as it may have been invalidated.
748 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
749 SCEV *S = SCEVAllocator.Allocate<SCEVTruncateExpr>();
750 new (S) SCEVTruncateExpr(ID, Op, Ty);
751 UniqueSCEVs.InsertNode(S, IP);
755 const SCEV *ScalarEvolution::getZeroExtendExpr(const SCEV *Op,
757 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
758 "This is not an extending conversion!");
759 assert(isSCEVable(Ty) &&
760 "This is not a conversion to a SCEVable type!");
761 Ty = getEffectiveSCEVType(Ty);
763 // Fold if the operand is constant.
764 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
765 const Type *IntTy = getEffectiveSCEVType(Ty);
766 Constant *C = ConstantExpr::getZExt(SC->getValue(), IntTy);
767 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
768 return getConstant(cast<ConstantInt>(C));
771 // zext(zext(x)) --> zext(x)
772 if (const SCEVZeroExtendExpr *SZ = dyn_cast<SCEVZeroExtendExpr>(Op))
773 return getZeroExtendExpr(SZ->getOperand(), Ty);
775 // Before doing any expensive analysis, check to see if we've already
776 // computed a SCEV for this Op and Ty.
778 ID.AddInteger(scZeroExtend);
782 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
784 // If the input value is a chrec scev, and we can prove that the value
785 // did not overflow the old, smaller, value, we can zero extend all of the
786 // operands (often constants). This allows analysis of something like
787 // this: for (unsigned char X = 0; X < 100; ++X) { int Y = X; }
788 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
789 if (AR->isAffine()) {
790 // Check whether the backedge-taken count is SCEVCouldNotCompute.
791 // Note that this serves two purposes: It filters out loops that are
792 // simply not analyzable, and it covers the case where this code is
793 // being called from within backedge-taken count analysis, such that
794 // attempting to ask for the backedge-taken count would likely result
795 // in infinite recursion. In the later case, the analysis code will
796 // cope with a conservative value, and it will take care to purge
797 // that value once it has finished.
798 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
799 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
800 // Manually compute the final value for AR, checking for
802 const SCEV *Start = AR->getStart();
803 const SCEV *Step = AR->getStepRecurrence(*this);
805 // Check whether the backedge-taken count can be losslessly casted to
806 // the addrec's type. The count is always unsigned.
807 const SCEV *CastedMaxBECount =
808 getTruncateOrZeroExtend(MaxBECount, Start->getType());
809 const SCEV *RecastedMaxBECount =
810 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
811 if (MaxBECount == RecastedMaxBECount) {
813 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
814 // Check whether Start+Step*MaxBECount has no unsigned overflow.
816 getMulExpr(CastedMaxBECount,
817 getTruncateOrZeroExtend(Step, Start->getType()));
818 const SCEV *Add = getAddExpr(Start, ZMul);
819 const SCEV *OperandExtendedAdd =
820 getAddExpr(getZeroExtendExpr(Start, WideTy),
821 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
822 getZeroExtendExpr(Step, WideTy)));
823 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
824 // Return the expression with the addrec on the outside.
825 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
826 getZeroExtendExpr(Step, Ty),
829 // Similar to above, only this time treat the step value as signed.
830 // This covers loops that count down.
832 getMulExpr(CastedMaxBECount,
833 getTruncateOrSignExtend(Step, Start->getType()));
834 Add = getAddExpr(Start, SMul);
836 getAddExpr(getZeroExtendExpr(Start, WideTy),
837 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
838 getSignExtendExpr(Step, WideTy)));
839 if (getZeroExtendExpr(Add, WideTy) == OperandExtendedAdd)
840 // Return the expression with the addrec on the outside.
841 return getAddRecExpr(getZeroExtendExpr(Start, Ty),
842 getSignExtendExpr(Step, Ty),
848 // The cast wasn't folded; create an explicit cast node.
849 // Recompute the insert position, as it may have been invalidated.
850 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
851 SCEV *S = SCEVAllocator.Allocate<SCEVZeroExtendExpr>();
852 new (S) SCEVZeroExtendExpr(ID, Op, Ty);
853 UniqueSCEVs.InsertNode(S, IP);
857 const SCEV *ScalarEvolution::getSignExtendExpr(const SCEV *Op,
859 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
860 "This is not an extending conversion!");
861 assert(isSCEVable(Ty) &&
862 "This is not a conversion to a SCEVable type!");
863 Ty = getEffectiveSCEVType(Ty);
865 // Fold if the operand is constant.
866 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op)) {
867 const Type *IntTy = getEffectiveSCEVType(Ty);
868 Constant *C = ConstantExpr::getSExt(SC->getValue(), IntTy);
869 if (IntTy != Ty) C = ConstantExpr::getIntToPtr(C, Ty);
870 return getConstant(cast<ConstantInt>(C));
873 // sext(sext(x)) --> sext(x)
874 if (const SCEVSignExtendExpr *SS = dyn_cast<SCEVSignExtendExpr>(Op))
875 return getSignExtendExpr(SS->getOperand(), Ty);
877 // Before doing any expensive analysis, check to see if we've already
878 // computed a SCEV for this Op and Ty.
880 ID.AddInteger(scSignExtend);
884 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
886 // If the input value is a chrec scev, and we can prove that the value
887 // did not overflow the old, smaller, value, we can sign extend all of the
888 // operands (often constants). This allows analysis of something like
889 // this: for (signed char X = 0; X < 100; ++X) { int Y = X; }
890 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Op))
891 if (AR->isAffine()) {
892 // Check whether the backedge-taken count is SCEVCouldNotCompute.
893 // Note that this serves two purposes: It filters out loops that are
894 // simply not analyzable, and it covers the case where this code is
895 // being called from within backedge-taken count analysis, such that
896 // attempting to ask for the backedge-taken count would likely result
897 // in infinite recursion. In the later case, the analysis code will
898 // cope with a conservative value, and it will take care to purge
899 // that value once it has finished.
900 const SCEV *MaxBECount = getMaxBackedgeTakenCount(AR->getLoop());
901 if (!isa<SCEVCouldNotCompute>(MaxBECount)) {
902 // Manually compute the final value for AR, checking for
904 const SCEV *Start = AR->getStart();
905 const SCEV *Step = AR->getStepRecurrence(*this);
907 // Check whether the backedge-taken count can be losslessly casted to
908 // the addrec's type. The count is always unsigned.
909 const SCEV *CastedMaxBECount =
910 getTruncateOrZeroExtend(MaxBECount, Start->getType());
911 const SCEV *RecastedMaxBECount =
912 getTruncateOrZeroExtend(CastedMaxBECount, MaxBECount->getType());
913 if (MaxBECount == RecastedMaxBECount) {
915 IntegerType::get(getTypeSizeInBits(Start->getType()) * 2);
916 // Check whether Start+Step*MaxBECount has no signed overflow.
918 getMulExpr(CastedMaxBECount,
919 getTruncateOrSignExtend(Step, Start->getType()));
920 const SCEV *Add = getAddExpr(Start, SMul);
921 const SCEV *OperandExtendedAdd =
922 getAddExpr(getSignExtendExpr(Start, WideTy),
923 getMulExpr(getZeroExtendExpr(CastedMaxBECount, WideTy),
924 getSignExtendExpr(Step, WideTy)));
925 if (getSignExtendExpr(Add, WideTy) == OperandExtendedAdd)
926 // Return the expression with the addrec on the outside.
927 return getAddRecExpr(getSignExtendExpr(Start, Ty),
928 getSignExtendExpr(Step, Ty),
934 // The cast wasn't folded; create an explicit cast node.
935 // Recompute the insert position, as it may have been invalidated.
936 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
937 SCEV *S = SCEVAllocator.Allocate<SCEVSignExtendExpr>();
938 new (S) SCEVSignExtendExpr(ID, Op, Ty);
939 UniqueSCEVs.InsertNode(S, IP);
943 /// getAnyExtendExpr - Return a SCEV for the given operand extended with
944 /// unspecified bits out to the given type.
946 const SCEV *ScalarEvolution::getAnyExtendExpr(const SCEV *Op,
948 assert(getTypeSizeInBits(Op->getType()) < getTypeSizeInBits(Ty) &&
949 "This is not an extending conversion!");
950 assert(isSCEVable(Ty) &&
951 "This is not a conversion to a SCEVable type!");
952 Ty = getEffectiveSCEVType(Ty);
954 // Sign-extend negative constants.
955 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(Op))
956 if (SC->getValue()->getValue().isNegative())
957 return getSignExtendExpr(Op, Ty);
959 // Peel off a truncate cast.
960 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Op)) {
961 const SCEV *NewOp = T->getOperand();
962 if (getTypeSizeInBits(NewOp->getType()) < getTypeSizeInBits(Ty))
963 return getAnyExtendExpr(NewOp, Ty);
964 return getTruncateOrNoop(NewOp, Ty);
967 // Next try a zext cast. If the cast is folded, use it.
968 const SCEV *ZExt = getZeroExtendExpr(Op, Ty);
969 if (!isa<SCEVZeroExtendExpr>(ZExt))
972 // Next try a sext cast. If the cast is folded, use it.
973 const SCEV *SExt = getSignExtendExpr(Op, Ty);
974 if (!isa<SCEVSignExtendExpr>(SExt))
977 // If the expression is obviously signed, use the sext cast value.
978 if (isa<SCEVSMaxExpr>(Op))
981 // Absent any other information, use the zext cast value.
985 /// CollectAddOperandsWithScales - Process the given Ops list, which is
986 /// a list of operands to be added under the given scale, update the given
987 /// map. This is a helper function for getAddRecExpr. As an example of
988 /// what it does, given a sequence of operands that would form an add
989 /// expression like this:
991 /// m + n + 13 + (A * (o + p + (B * q + m + 29))) + r + (-1 * r)
993 /// where A and B are constants, update the map with these values:
995 /// (m, 1+A*B), (n, 1), (o, A), (p, A), (q, A*B), (r, 0)
997 /// and add 13 + A*B*29 to AccumulatedConstant.
998 /// This will allow getAddRecExpr to produce this:
1000 /// 13+A*B*29 + n + (m * (1+A*B)) + ((o + p) * A) + (q * A*B)
1002 /// This form often exposes folding opportunities that are hidden in
1003 /// the original operand list.
1005 /// Return true iff it appears that any interesting folding opportunities
1006 /// may be exposed. This helps getAddRecExpr short-circuit extra work in
1007 /// the common case where no interesting opportunities are present, and
1008 /// is also used as a check to avoid infinite recursion.
1011 CollectAddOperandsWithScales(DenseMap<const SCEV *, APInt> &M,
1012 SmallVector<const SCEV *, 8> &NewOps,
1013 APInt &AccumulatedConstant,
1014 const SmallVectorImpl<const SCEV *> &Ops,
1016 ScalarEvolution &SE) {
1017 bool Interesting = false;
1019 // Iterate over the add operands.
1020 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1021 const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[i]);
1022 if (Mul && isa<SCEVConstant>(Mul->getOperand(0))) {
1024 Scale * cast<SCEVConstant>(Mul->getOperand(0))->getValue()->getValue();
1025 if (Mul->getNumOperands() == 2 && isa<SCEVAddExpr>(Mul->getOperand(1))) {
1026 // A multiplication of a constant with another add; recurse.
1028 CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1029 cast<SCEVAddExpr>(Mul->getOperand(1))
1033 // A multiplication of a constant with some other value. Update
1035 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin()+1, Mul->op_end());
1036 const SCEV *Key = SE.getMulExpr(MulOps);
1037 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1038 M.insert(std::make_pair(Key, NewScale));
1040 NewOps.push_back(Pair.first->first);
1042 Pair.first->second += NewScale;
1043 // The map already had an entry for this value, which may indicate
1044 // a folding opportunity.
1048 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1049 // Pull a buried constant out to the outside.
1050 if (Scale != 1 || AccumulatedConstant != 0 || C->isZero())
1052 AccumulatedConstant += Scale * C->getValue()->getValue();
1054 // An ordinary operand. Update the map.
1055 std::pair<DenseMap<const SCEV *, APInt>::iterator, bool> Pair =
1056 M.insert(std::make_pair(Ops[i], Scale));
1058 NewOps.push_back(Pair.first->first);
1060 Pair.first->second += Scale;
1061 // The map already had an entry for this value, which may indicate
1062 // a folding opportunity.
1072 struct APIntCompare {
1073 bool operator()(const APInt &LHS, const APInt &RHS) const {
1074 return LHS.ult(RHS);
1079 /// getAddExpr - Get a canonical add expression, or something simpler if
1081 const SCEV *ScalarEvolution::getAddExpr(SmallVectorImpl<const SCEV *> &Ops) {
1082 assert(!Ops.empty() && "Cannot get empty add!");
1083 if (Ops.size() == 1) return Ops[0];
1085 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1086 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1087 getEffectiveSCEVType(Ops[0]->getType()) &&
1088 "SCEVAddExpr operand types don't match!");
1091 // Sort by complexity, this groups all similar expression types together.
1092 GroupByComplexity(Ops, LI);
1094 // If there are any constants, fold them together.
1096 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1098 assert(Idx < Ops.size());
1099 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1100 // We found two constants, fold them together!
1101 Ops[0] = getConstant(LHSC->getValue()->getValue() +
1102 RHSC->getValue()->getValue());
1103 if (Ops.size() == 2) return Ops[0];
1104 Ops.erase(Ops.begin()+1); // Erase the folded element
1105 LHSC = cast<SCEVConstant>(Ops[0]);
1108 // If we are left with a constant zero being added, strip it off.
1109 if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1110 Ops.erase(Ops.begin());
1115 if (Ops.size() == 1) return Ops[0];
1117 // Okay, check to see if the same value occurs in the operand list twice. If
1118 // so, merge them together into an multiply expression. Since we sorted the
1119 // list, these values are required to be adjacent.
1120 const Type *Ty = Ops[0]->getType();
1121 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1122 if (Ops[i] == Ops[i+1]) { // X + Y + Y --> X + Y*2
1123 // Found a match, merge the two values into a multiply, and add any
1124 // remaining values to the result.
1125 const SCEV *Two = getIntegerSCEV(2, Ty);
1126 const SCEV *Mul = getMulExpr(Ops[i], Two);
1127 if (Ops.size() == 2)
1129 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1131 return getAddExpr(Ops);
1134 // Check for truncates. If all the operands are truncated from the same
1135 // type, see if factoring out the truncate would permit the result to be
1136 // folded. eg., trunc(x) + m*trunc(n) --> trunc(x + trunc(m)*n)
1137 // if the contents of the resulting outer trunc fold to something simple.
1138 for (; Idx < Ops.size() && isa<SCEVTruncateExpr>(Ops[Idx]); ++Idx) {
1139 const SCEVTruncateExpr *Trunc = cast<SCEVTruncateExpr>(Ops[Idx]);
1140 const Type *DstType = Trunc->getType();
1141 const Type *SrcType = Trunc->getOperand()->getType();
1142 SmallVector<const SCEV *, 8> LargeOps;
1144 // Check all the operands to see if they can be represented in the
1145 // source type of the truncate.
1146 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1147 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(Ops[i])) {
1148 if (T->getOperand()->getType() != SrcType) {
1152 LargeOps.push_back(T->getOperand());
1153 } else if (const SCEVConstant *C = dyn_cast<SCEVConstant>(Ops[i])) {
1154 // This could be either sign or zero extension, but sign extension
1155 // is much more likely to be foldable here.
1156 LargeOps.push_back(getSignExtendExpr(C, SrcType));
1157 } else if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(Ops[i])) {
1158 SmallVector<const SCEV *, 8> LargeMulOps;
1159 for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) {
1160 if (const SCEVTruncateExpr *T =
1161 dyn_cast<SCEVTruncateExpr>(M->getOperand(j))) {
1162 if (T->getOperand()->getType() != SrcType) {
1166 LargeMulOps.push_back(T->getOperand());
1167 } else if (const SCEVConstant *C =
1168 dyn_cast<SCEVConstant>(M->getOperand(j))) {
1169 // This could be either sign or zero extension, but sign extension
1170 // is much more likely to be foldable here.
1171 LargeMulOps.push_back(getSignExtendExpr(C, SrcType));
1178 LargeOps.push_back(getMulExpr(LargeMulOps));
1185 // Evaluate the expression in the larger type.
1186 const SCEV *Fold = getAddExpr(LargeOps);
1187 // If it folds to something simple, use it. Otherwise, don't.
1188 if (isa<SCEVConstant>(Fold) || isa<SCEVUnknown>(Fold))
1189 return getTruncateExpr(Fold, DstType);
1193 // Skip past any other cast SCEVs.
1194 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddExpr)
1197 // If there are add operands they would be next.
1198 if (Idx < Ops.size()) {
1199 bool DeletedAdd = false;
1200 while (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[Idx])) {
1201 // If we have an add, expand the add operands onto the end of the operands
1203 Ops.insert(Ops.end(), Add->op_begin(), Add->op_end());
1204 Ops.erase(Ops.begin()+Idx);
1208 // If we deleted at least one add, we added operands to the end of the list,
1209 // and they are not necessarily sorted. Recurse to resort and resimplify
1210 // any operands we just aquired.
1212 return getAddExpr(Ops);
1215 // Skip over the add expression until we get to a multiply.
1216 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1219 // Check to see if there are any folding opportunities present with
1220 // operands multiplied by constant values.
1221 if (Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx])) {
1222 uint64_t BitWidth = getTypeSizeInBits(Ty);
1223 DenseMap<const SCEV *, APInt> M;
1224 SmallVector<const SCEV *, 8> NewOps;
1225 APInt AccumulatedConstant(BitWidth, 0);
1226 if (CollectAddOperandsWithScales(M, NewOps, AccumulatedConstant,
1227 Ops, APInt(BitWidth, 1), *this)) {
1228 // Some interesting folding opportunity is present, so its worthwhile to
1229 // re-generate the operands list. Group the operands by constant scale,
1230 // to avoid multiplying by the same constant scale multiple times.
1231 std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare> MulOpLists;
1232 for (SmallVector<const SCEV *, 8>::iterator I = NewOps.begin(),
1233 E = NewOps.end(); I != E; ++I)
1234 MulOpLists[M.find(*I)->second].push_back(*I);
1235 // Re-generate the operands list.
1237 if (AccumulatedConstant != 0)
1238 Ops.push_back(getConstant(AccumulatedConstant));
1239 for (std::map<APInt, SmallVector<const SCEV *, 4>, APIntCompare>::iterator
1240 I = MulOpLists.begin(), E = MulOpLists.end(); I != E; ++I)
1242 Ops.push_back(getMulExpr(getConstant(I->first),
1243 getAddExpr(I->second)));
1245 return getIntegerSCEV(0, Ty);
1246 if (Ops.size() == 1)
1248 return getAddExpr(Ops);
1252 // If we are adding something to a multiply expression, make sure the
1253 // something is not already an operand of the multiply. If so, merge it into
1255 for (; Idx < Ops.size() && isa<SCEVMulExpr>(Ops[Idx]); ++Idx) {
1256 const SCEVMulExpr *Mul = cast<SCEVMulExpr>(Ops[Idx]);
1257 for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) {
1258 const SCEV *MulOpSCEV = Mul->getOperand(MulOp);
1259 for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp)
1260 if (MulOpSCEV == Ops[AddOp] && !isa<SCEVConstant>(Ops[AddOp])) {
1261 // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1))
1262 const SCEV *InnerMul = Mul->getOperand(MulOp == 0);
1263 if (Mul->getNumOperands() != 2) {
1264 // If the multiply has more than two operands, we must get the
1266 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(), Mul->op_end());
1267 MulOps.erase(MulOps.begin()+MulOp);
1268 InnerMul = getMulExpr(MulOps);
1270 const SCEV *One = getIntegerSCEV(1, Ty);
1271 const SCEV *AddOne = getAddExpr(InnerMul, One);
1272 const SCEV *OuterMul = getMulExpr(AddOne, Ops[AddOp]);
1273 if (Ops.size() == 2) return OuterMul;
1275 Ops.erase(Ops.begin()+AddOp);
1276 Ops.erase(Ops.begin()+Idx-1);
1278 Ops.erase(Ops.begin()+Idx);
1279 Ops.erase(Ops.begin()+AddOp-1);
1281 Ops.push_back(OuterMul);
1282 return getAddExpr(Ops);
1285 // Check this multiply against other multiplies being added together.
1286 for (unsigned OtherMulIdx = Idx+1;
1287 OtherMulIdx < Ops.size() && isa<SCEVMulExpr>(Ops[OtherMulIdx]);
1289 const SCEVMulExpr *OtherMul = cast<SCEVMulExpr>(Ops[OtherMulIdx]);
1290 // If MulOp occurs in OtherMul, we can fold the two multiplies
1292 for (unsigned OMulOp = 0, e = OtherMul->getNumOperands();
1293 OMulOp != e; ++OMulOp)
1294 if (OtherMul->getOperand(OMulOp) == MulOpSCEV) {
1295 // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E))
1296 const SCEV *InnerMul1 = Mul->getOperand(MulOp == 0);
1297 if (Mul->getNumOperands() != 2) {
1298 SmallVector<const SCEV *, 4> MulOps(Mul->op_begin(),
1300 MulOps.erase(MulOps.begin()+MulOp);
1301 InnerMul1 = getMulExpr(MulOps);
1303 const SCEV *InnerMul2 = OtherMul->getOperand(OMulOp == 0);
1304 if (OtherMul->getNumOperands() != 2) {
1305 SmallVector<const SCEV *, 4> MulOps(OtherMul->op_begin(),
1306 OtherMul->op_end());
1307 MulOps.erase(MulOps.begin()+OMulOp);
1308 InnerMul2 = getMulExpr(MulOps);
1310 const SCEV *InnerMulSum = getAddExpr(InnerMul1,InnerMul2);
1311 const SCEV *OuterMul = getMulExpr(MulOpSCEV, InnerMulSum);
1312 if (Ops.size() == 2) return OuterMul;
1313 Ops.erase(Ops.begin()+Idx);
1314 Ops.erase(Ops.begin()+OtherMulIdx-1);
1315 Ops.push_back(OuterMul);
1316 return getAddExpr(Ops);
1322 // If there are any add recurrences in the operands list, see if any other
1323 // added values are loop invariant. If so, we can fold them into the
1325 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1328 // Scan over all recurrences, trying to fold loop invariants into them.
1329 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1330 // Scan all of the other operands to this add and add them to the vector if
1331 // they are loop invariant w.r.t. the recurrence.
1332 SmallVector<const SCEV *, 8> LIOps;
1333 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1334 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1335 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1336 LIOps.push_back(Ops[i]);
1337 Ops.erase(Ops.begin()+i);
1341 // If we found some loop invariants, fold them into the recurrence.
1342 if (!LIOps.empty()) {
1343 // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step}
1344 LIOps.push_back(AddRec->getStart());
1346 SmallVector<const SCEV *, 4> AddRecOps(AddRec->op_begin(),
1348 AddRecOps[0] = getAddExpr(LIOps);
1350 const SCEV *NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop());
1351 // If all of the other operands were loop invariant, we are done.
1352 if (Ops.size() == 1) return NewRec;
1354 // Otherwise, add the folded AddRec by the non-liv parts.
1355 for (unsigned i = 0;; ++i)
1356 if (Ops[i] == AddRec) {
1360 return getAddExpr(Ops);
1363 // Okay, if there weren't any loop invariants to be folded, check to see if
1364 // there are multiple AddRec's with the same loop induction variable being
1365 // added together. If so, we can fold them.
1366 for (unsigned OtherIdx = Idx+1;
1367 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1368 if (OtherIdx != Idx) {
1369 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1370 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1371 // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D}
1372 SmallVector<const SCEV *, 4> NewOps(AddRec->op_begin(),
1374 for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) {
1375 if (i >= NewOps.size()) {
1376 NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i,
1377 OtherAddRec->op_end());
1380 NewOps[i] = getAddExpr(NewOps[i], OtherAddRec->getOperand(i));
1382 const SCEV *NewAddRec = getAddRecExpr(NewOps, AddRec->getLoop());
1384 if (Ops.size() == 2) return NewAddRec;
1386 Ops.erase(Ops.begin()+Idx);
1387 Ops.erase(Ops.begin()+OtherIdx-1);
1388 Ops.push_back(NewAddRec);
1389 return getAddExpr(Ops);
1393 // Otherwise couldn't fold anything into this recurrence. Move onto the
1397 // Okay, it looks like we really DO need an add expr. Check to see if we
1398 // already have one, otherwise create a new one.
1399 FoldingSetNodeID ID;
1400 ID.AddInteger(scAddExpr);
1401 ID.AddInteger(Ops.size());
1402 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1403 ID.AddPointer(Ops[i]);
1405 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1406 SCEV *S = SCEVAllocator.Allocate<SCEVAddExpr>();
1407 new (S) SCEVAddExpr(ID, Ops);
1408 UniqueSCEVs.InsertNode(S, IP);
1413 /// getMulExpr - Get a canonical multiply expression, or something simpler if
1415 const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops) {
1416 assert(!Ops.empty() && "Cannot get empty mul!");
1418 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1419 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1420 getEffectiveSCEVType(Ops[0]->getType()) &&
1421 "SCEVMulExpr operand types don't match!");
1424 // Sort by complexity, this groups all similar expression types together.
1425 GroupByComplexity(Ops, LI);
1427 // If there are any constants, fold them together.
1429 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1431 // C1*(C2+V) -> C1*C2 + C1*V
1432 if (Ops.size() == 2)
1433 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(Ops[1]))
1434 if (Add->getNumOperands() == 2 &&
1435 isa<SCEVConstant>(Add->getOperand(0)))
1436 return getAddExpr(getMulExpr(LHSC, Add->getOperand(0)),
1437 getMulExpr(LHSC, Add->getOperand(1)));
1441 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1442 // We found two constants, fold them together!
1443 ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() *
1444 RHSC->getValue()->getValue());
1445 Ops[0] = getConstant(Fold);
1446 Ops.erase(Ops.begin()+1); // Erase the folded element
1447 if (Ops.size() == 1) return Ops[0];
1448 LHSC = cast<SCEVConstant>(Ops[0]);
1451 // If we are left with a constant one being multiplied, strip it off.
1452 if (cast<SCEVConstant>(Ops[0])->getValue()->equalsInt(1)) {
1453 Ops.erase(Ops.begin());
1455 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isZero()) {
1456 // If we have a multiply of zero, it will always be zero.
1461 // Skip over the add expression until we get to a multiply.
1462 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scMulExpr)
1465 if (Ops.size() == 1)
1468 // If there are mul operands inline them all into this expression.
1469 if (Idx < Ops.size()) {
1470 bool DeletedMul = false;
1471 while (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(Ops[Idx])) {
1472 // If we have an mul, expand the mul operands onto the end of the operands
1474 Ops.insert(Ops.end(), Mul->op_begin(), Mul->op_end());
1475 Ops.erase(Ops.begin()+Idx);
1479 // If we deleted at least one mul, we added operands to the end of the list,
1480 // and they are not necessarily sorted. Recurse to resort and resimplify
1481 // any operands we just aquired.
1483 return getMulExpr(Ops);
1486 // If there are any add recurrences in the operands list, see if any other
1487 // added values are loop invariant. If so, we can fold them into the
1489 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scAddRecExpr)
1492 // Scan over all recurrences, trying to fold loop invariants into them.
1493 for (; Idx < Ops.size() && isa<SCEVAddRecExpr>(Ops[Idx]); ++Idx) {
1494 // Scan all of the other operands to this mul and add them to the vector if
1495 // they are loop invariant w.r.t. the recurrence.
1496 SmallVector<const SCEV *, 8> LIOps;
1497 const SCEVAddRecExpr *AddRec = cast<SCEVAddRecExpr>(Ops[Idx]);
1498 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1499 if (Ops[i]->isLoopInvariant(AddRec->getLoop())) {
1500 LIOps.push_back(Ops[i]);
1501 Ops.erase(Ops.begin()+i);
1505 // If we found some loop invariants, fold them into the recurrence.
1506 if (!LIOps.empty()) {
1507 // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step}
1508 SmallVector<const SCEV *, 4> NewOps;
1509 NewOps.reserve(AddRec->getNumOperands());
1510 if (LIOps.size() == 1) {
1511 const SCEV *Scale = LIOps[0];
1512 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i)
1513 NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i)));
1515 for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) {
1516 SmallVector<const SCEV *, 4> MulOps(LIOps.begin(), LIOps.end());
1517 MulOps.push_back(AddRec->getOperand(i));
1518 NewOps.push_back(getMulExpr(MulOps));
1522 const SCEV *NewRec = getAddRecExpr(NewOps, AddRec->getLoop());
1524 // If all of the other operands were loop invariant, we are done.
1525 if (Ops.size() == 1) return NewRec;
1527 // Otherwise, multiply the folded AddRec by the non-liv parts.
1528 for (unsigned i = 0;; ++i)
1529 if (Ops[i] == AddRec) {
1533 return getMulExpr(Ops);
1536 // Okay, if there weren't any loop invariants to be folded, check to see if
1537 // there are multiple AddRec's with the same loop induction variable being
1538 // multiplied together. If so, we can fold them.
1539 for (unsigned OtherIdx = Idx+1;
1540 OtherIdx < Ops.size() && isa<SCEVAddRecExpr>(Ops[OtherIdx]);++OtherIdx)
1541 if (OtherIdx != Idx) {
1542 const SCEVAddRecExpr *OtherAddRec = cast<SCEVAddRecExpr>(Ops[OtherIdx]);
1543 if (AddRec->getLoop() == OtherAddRec->getLoop()) {
1544 // F * G --> {A,+,B} * {C,+,D} --> {A*C,+,F*D + G*B + B*D}
1545 const SCEVAddRecExpr *F = AddRec, *G = OtherAddRec;
1546 const SCEV *NewStart = getMulExpr(F->getStart(),
1548 const SCEV *B = F->getStepRecurrence(*this);
1549 const SCEV *D = G->getStepRecurrence(*this);
1550 const SCEV *NewStep = getAddExpr(getMulExpr(F, D),
1553 const SCEV *NewAddRec = getAddRecExpr(NewStart, NewStep,
1555 if (Ops.size() == 2) return NewAddRec;
1557 Ops.erase(Ops.begin()+Idx);
1558 Ops.erase(Ops.begin()+OtherIdx-1);
1559 Ops.push_back(NewAddRec);
1560 return getMulExpr(Ops);
1564 // Otherwise couldn't fold anything into this recurrence. Move onto the
1568 // Okay, it looks like we really DO need an mul expr. Check to see if we
1569 // already have one, otherwise create a new one.
1570 FoldingSetNodeID ID;
1571 ID.AddInteger(scMulExpr);
1572 ID.AddInteger(Ops.size());
1573 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1574 ID.AddPointer(Ops[i]);
1576 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1577 SCEV *S = SCEVAllocator.Allocate<SCEVMulExpr>();
1578 new (S) SCEVMulExpr(ID, Ops);
1579 UniqueSCEVs.InsertNode(S, IP);
1583 /// getUDivExpr - Get a canonical multiply expression, or something simpler if
1585 const SCEV *ScalarEvolution::getUDivExpr(const SCEV *LHS,
1587 assert(getEffectiveSCEVType(LHS->getType()) ==
1588 getEffectiveSCEVType(RHS->getType()) &&
1589 "SCEVUDivExpr operand types don't match!");
1591 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS)) {
1592 if (RHSC->getValue()->equalsInt(1))
1593 return LHS; // X udiv 1 --> x
1595 return getIntegerSCEV(0, LHS->getType()); // value is undefined
1597 // Determine if the division can be folded into the operands of
1599 // TODO: Generalize this to non-constants by using known-bits information.
1600 const Type *Ty = LHS->getType();
1601 unsigned LZ = RHSC->getValue()->getValue().countLeadingZeros();
1602 unsigned MaxShiftAmt = getTypeSizeInBits(Ty) - LZ;
1603 // For non-power-of-two values, effectively round the value up to the
1604 // nearest power of two.
1605 if (!RHSC->getValue()->getValue().isPowerOf2())
1607 const IntegerType *ExtTy =
1608 IntegerType::get(getTypeSizeInBits(Ty) + MaxShiftAmt);
1609 // {X,+,N}/C --> {X/C,+,N/C} if safe and N/C can be folded.
1610 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS))
1611 if (const SCEVConstant *Step =
1612 dyn_cast<SCEVConstant>(AR->getStepRecurrence(*this)))
1613 if (!Step->getValue()->getValue()
1614 .urem(RHSC->getValue()->getValue()) &&
1615 getZeroExtendExpr(AR, ExtTy) ==
1616 getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy),
1617 getZeroExtendExpr(Step, ExtTy),
1619 SmallVector<const SCEV *, 4> Operands;
1620 for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i)
1621 Operands.push_back(getUDivExpr(AR->getOperand(i), RHS));
1622 return getAddRecExpr(Operands, AR->getLoop());
1624 // (A*B)/C --> A*(B/C) if safe and B/C can be folded.
1625 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(LHS)) {
1626 SmallVector<const SCEV *, 4> Operands;
1627 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i)
1628 Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy));
1629 if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands))
1630 // Find an operand that's safely divisible.
1631 for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) {
1632 const SCEV *Op = M->getOperand(i);
1633 const SCEV *Div = getUDivExpr(Op, RHSC);
1634 if (!isa<SCEVUDivExpr>(Div) && getMulExpr(Div, RHSC) == Op) {
1635 const SmallVectorImpl<const SCEV *> &MOperands = M->getOperands();
1636 Operands = SmallVector<const SCEV *, 4>(MOperands.begin(),
1639 return getMulExpr(Operands);
1643 // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded.
1644 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(LHS)) {
1645 SmallVector<const SCEV *, 4> Operands;
1646 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i)
1647 Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy));
1648 if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) {
1650 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
1651 const SCEV *Op = getUDivExpr(A->getOperand(i), RHS);
1652 if (isa<SCEVUDivExpr>(Op) || getMulExpr(Op, RHS) != A->getOperand(i))
1654 Operands.push_back(Op);
1656 if (Operands.size() == A->getNumOperands())
1657 return getAddExpr(Operands);
1661 // Fold if both operands are constant.
1662 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(LHS)) {
1663 Constant *LHSCV = LHSC->getValue();
1664 Constant *RHSCV = RHSC->getValue();
1665 return getConstant(cast<ConstantInt>(ConstantExpr::getUDiv(LHSCV,
1670 FoldingSetNodeID ID;
1671 ID.AddInteger(scUDivExpr);
1675 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1676 SCEV *S = SCEVAllocator.Allocate<SCEVUDivExpr>();
1677 new (S) SCEVUDivExpr(ID, LHS, RHS);
1678 UniqueSCEVs.InsertNode(S, IP);
1683 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1684 /// Simplify the expression as much as possible.
1685 const SCEV *ScalarEvolution::getAddRecExpr(const SCEV *Start,
1686 const SCEV *Step, const Loop *L) {
1687 SmallVector<const SCEV *, 4> Operands;
1688 Operands.push_back(Start);
1689 if (const SCEVAddRecExpr *StepChrec = dyn_cast<SCEVAddRecExpr>(Step))
1690 if (StepChrec->getLoop() == L) {
1691 Operands.insert(Operands.end(), StepChrec->op_begin(),
1692 StepChrec->op_end());
1693 return getAddRecExpr(Operands, L);
1696 Operands.push_back(Step);
1697 return getAddRecExpr(Operands, L);
1700 /// getAddRecExpr - Get an add recurrence expression for the specified loop.
1701 /// Simplify the expression as much as possible.
1703 ScalarEvolution::getAddRecExpr(SmallVectorImpl<const SCEV *> &Operands,
1705 if (Operands.size() == 1) return Operands[0];
1707 for (unsigned i = 1, e = Operands.size(); i != e; ++i)
1708 assert(getEffectiveSCEVType(Operands[i]->getType()) ==
1709 getEffectiveSCEVType(Operands[0]->getType()) &&
1710 "SCEVAddRecExpr operand types don't match!");
1713 if (Operands.back()->isZero()) {
1714 Operands.pop_back();
1715 return getAddRecExpr(Operands, L); // {X,+,0} --> X
1718 // Canonicalize nested AddRecs in by nesting them in order of loop depth.
1719 if (const SCEVAddRecExpr *NestedAR = dyn_cast<SCEVAddRecExpr>(Operands[0])) {
1720 const Loop* NestedLoop = NestedAR->getLoop();
1721 if (L->getLoopDepth() < NestedLoop->getLoopDepth()) {
1722 SmallVector<const SCEV *, 4> NestedOperands(NestedAR->op_begin(),
1723 NestedAR->op_end());
1724 Operands[0] = NestedAR->getStart();
1725 // AddRecs require their operands be loop-invariant with respect to their
1726 // loops. Don't perform this transformation if it would break this
1728 bool AllInvariant = true;
1729 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1730 if (!Operands[i]->isLoopInvariant(L)) {
1731 AllInvariant = false;
1735 NestedOperands[0] = getAddRecExpr(Operands, L);
1736 AllInvariant = true;
1737 for (unsigned i = 0, e = NestedOperands.size(); i != e; ++i)
1738 if (!NestedOperands[i]->isLoopInvariant(NestedLoop)) {
1739 AllInvariant = false;
1743 // Ok, both add recurrences are valid after the transformation.
1744 return getAddRecExpr(NestedOperands, NestedLoop);
1746 // Reset Operands to its original state.
1747 Operands[0] = NestedAR;
1751 FoldingSetNodeID ID;
1752 ID.AddInteger(scAddRecExpr);
1753 ID.AddInteger(Operands.size());
1754 for (unsigned i = 0, e = Operands.size(); i != e; ++i)
1755 ID.AddPointer(Operands[i]);
1758 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1759 SCEV *S = SCEVAllocator.Allocate<SCEVAddRecExpr>();
1760 new (S) SCEVAddRecExpr(ID, Operands, L);
1761 UniqueSCEVs.InsertNode(S, IP);
1765 const SCEV *ScalarEvolution::getSMaxExpr(const SCEV *LHS,
1767 SmallVector<const SCEV *, 2> Ops;
1770 return getSMaxExpr(Ops);
1774 ScalarEvolution::getSMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1775 assert(!Ops.empty() && "Cannot get empty smax!");
1776 if (Ops.size() == 1) return Ops[0];
1778 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1779 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1780 getEffectiveSCEVType(Ops[0]->getType()) &&
1781 "SCEVSMaxExpr operand types don't match!");
1784 // Sort by complexity, this groups all similar expression types together.
1785 GroupByComplexity(Ops, LI);
1787 // If there are any constants, fold them together.
1789 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1791 assert(Idx < Ops.size());
1792 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1793 // We found two constants, fold them together!
1794 ConstantInt *Fold = ConstantInt::get(
1795 APIntOps::smax(LHSC->getValue()->getValue(),
1796 RHSC->getValue()->getValue()));
1797 Ops[0] = getConstant(Fold);
1798 Ops.erase(Ops.begin()+1); // Erase the folded element
1799 if (Ops.size() == 1) return Ops[0];
1800 LHSC = cast<SCEVConstant>(Ops[0]);
1803 // If we are left with a constant minimum-int, strip it off.
1804 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(true)) {
1805 Ops.erase(Ops.begin());
1807 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(true)) {
1808 // If we have an smax with a constant maximum-int, it will always be
1814 if (Ops.size() == 1) return Ops[0];
1816 // Find the first SMax
1817 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scSMaxExpr)
1820 // Check to see if one of the operands is an SMax. If so, expand its operands
1821 // onto our operand list, and recurse to simplify.
1822 if (Idx < Ops.size()) {
1823 bool DeletedSMax = false;
1824 while (const SCEVSMaxExpr *SMax = dyn_cast<SCEVSMaxExpr>(Ops[Idx])) {
1825 Ops.insert(Ops.end(), SMax->op_begin(), SMax->op_end());
1826 Ops.erase(Ops.begin()+Idx);
1831 return getSMaxExpr(Ops);
1834 // Okay, check to see if the same value occurs in the operand list twice. If
1835 // so, delete one. Since we sorted the list, these values are required to
1837 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1838 if (Ops[i] == Ops[i+1]) { // X smax Y smax Y --> X smax Y
1839 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1843 if (Ops.size() == 1) return Ops[0];
1845 assert(!Ops.empty() && "Reduced smax down to nothing!");
1847 // Okay, it looks like we really DO need an smax expr. Check to see if we
1848 // already have one, otherwise create a new one.
1849 FoldingSetNodeID ID;
1850 ID.AddInteger(scSMaxExpr);
1851 ID.AddInteger(Ops.size());
1852 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1853 ID.AddPointer(Ops[i]);
1855 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1856 SCEV *S = SCEVAllocator.Allocate<SCEVSMaxExpr>();
1857 new (S) SCEVSMaxExpr(ID, Ops);
1858 UniqueSCEVs.InsertNode(S, IP);
1862 const SCEV *ScalarEvolution::getUMaxExpr(const SCEV *LHS,
1864 SmallVector<const SCEV *, 2> Ops;
1867 return getUMaxExpr(Ops);
1871 ScalarEvolution::getUMaxExpr(SmallVectorImpl<const SCEV *> &Ops) {
1872 assert(!Ops.empty() && "Cannot get empty umax!");
1873 if (Ops.size() == 1) return Ops[0];
1875 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
1876 assert(getEffectiveSCEVType(Ops[i]->getType()) ==
1877 getEffectiveSCEVType(Ops[0]->getType()) &&
1878 "SCEVUMaxExpr operand types don't match!");
1881 // Sort by complexity, this groups all similar expression types together.
1882 GroupByComplexity(Ops, LI);
1884 // If there are any constants, fold them together.
1886 if (const SCEVConstant *LHSC = dyn_cast<SCEVConstant>(Ops[0])) {
1888 assert(Idx < Ops.size());
1889 while (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(Ops[Idx])) {
1890 // We found two constants, fold them together!
1891 ConstantInt *Fold = ConstantInt::get(
1892 APIntOps::umax(LHSC->getValue()->getValue(),
1893 RHSC->getValue()->getValue()));
1894 Ops[0] = getConstant(Fold);
1895 Ops.erase(Ops.begin()+1); // Erase the folded element
1896 if (Ops.size() == 1) return Ops[0];
1897 LHSC = cast<SCEVConstant>(Ops[0]);
1900 // If we are left with a constant minimum-int, strip it off.
1901 if (cast<SCEVConstant>(Ops[0])->getValue()->isMinValue(false)) {
1902 Ops.erase(Ops.begin());
1904 } else if (cast<SCEVConstant>(Ops[0])->getValue()->isMaxValue(false)) {
1905 // If we have an umax with a constant maximum-int, it will always be
1911 if (Ops.size() == 1) return Ops[0];
1913 // Find the first UMax
1914 while (Idx < Ops.size() && Ops[Idx]->getSCEVType() < scUMaxExpr)
1917 // Check to see if one of the operands is a UMax. If so, expand its operands
1918 // onto our operand list, and recurse to simplify.
1919 if (Idx < Ops.size()) {
1920 bool DeletedUMax = false;
1921 while (const SCEVUMaxExpr *UMax = dyn_cast<SCEVUMaxExpr>(Ops[Idx])) {
1922 Ops.insert(Ops.end(), UMax->op_begin(), UMax->op_end());
1923 Ops.erase(Ops.begin()+Idx);
1928 return getUMaxExpr(Ops);
1931 // Okay, check to see if the same value occurs in the operand list twice. If
1932 // so, delete one. Since we sorted the list, these values are required to
1934 for (unsigned i = 0, e = Ops.size()-1; i != e; ++i)
1935 if (Ops[i] == Ops[i+1]) { // X umax Y umax Y --> X umax Y
1936 Ops.erase(Ops.begin()+i, Ops.begin()+i+1);
1940 if (Ops.size() == 1) return Ops[0];
1942 assert(!Ops.empty() && "Reduced umax down to nothing!");
1944 // Okay, it looks like we really DO need a umax expr. Check to see if we
1945 // already have one, otherwise create a new one.
1946 FoldingSetNodeID ID;
1947 ID.AddInteger(scUMaxExpr);
1948 ID.AddInteger(Ops.size());
1949 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1950 ID.AddPointer(Ops[i]);
1952 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1953 SCEV *S = SCEVAllocator.Allocate<SCEVUMaxExpr>();
1954 new (S) SCEVUMaxExpr(ID, Ops);
1955 UniqueSCEVs.InsertNode(S, IP);
1959 const SCEV *ScalarEvolution::getSMinExpr(const SCEV *LHS,
1961 // ~smax(~x, ~y) == smin(x, y).
1962 return getNotSCEV(getSMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1965 const SCEV *ScalarEvolution::getUMinExpr(const SCEV *LHS,
1967 // ~umax(~x, ~y) == umin(x, y)
1968 return getNotSCEV(getUMaxExpr(getNotSCEV(LHS), getNotSCEV(RHS)));
1971 const SCEV *ScalarEvolution::getUnknown(Value *V) {
1972 // Don't attempt to do anything other than create a SCEVUnknown object
1973 // here. createSCEV only calls getUnknown after checking for all other
1974 // interesting possibilities, and any other code that calls getUnknown
1975 // is doing so in order to hide a value from SCEV canonicalization.
1977 FoldingSetNodeID ID;
1978 ID.AddInteger(scUnknown);
1981 if (const SCEV *S = UniqueSCEVs.FindNodeOrInsertPos(ID, IP)) return S;
1982 SCEV *S = SCEVAllocator.Allocate<SCEVUnknown>();
1983 new (S) SCEVUnknown(ID, V);
1984 UniqueSCEVs.InsertNode(S, IP);
1988 //===----------------------------------------------------------------------===//
1989 // Basic SCEV Analysis and PHI Idiom Recognition Code
1992 /// isSCEVable - Test if values of the given type are analyzable within
1993 /// the SCEV framework. This primarily includes integer types, and it
1994 /// can optionally include pointer types if the ScalarEvolution class
1995 /// has access to target-specific information.
1996 bool ScalarEvolution::isSCEVable(const Type *Ty) const {
1997 // Integers are always SCEVable.
1998 if (Ty->isInteger())
2001 // Pointers are SCEVable if TargetData information is available
2002 // to provide pointer size information.
2003 if (isa<PointerType>(Ty))
2006 // Otherwise it's not SCEVable.
2010 /// getTypeSizeInBits - Return the size in bits of the specified type,
2011 /// for which isSCEVable must return true.
2012 uint64_t ScalarEvolution::getTypeSizeInBits(const Type *Ty) const {
2013 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2015 // If we have a TargetData, use it!
2017 return TD->getTypeSizeInBits(Ty);
2019 // Otherwise, we support only integer types.
2020 assert(Ty->isInteger() && "isSCEVable permitted a non-SCEVable type!");
2021 return Ty->getPrimitiveSizeInBits();
2024 /// getEffectiveSCEVType - Return a type with the same bitwidth as
2025 /// the given type and which represents how SCEV will treat the given
2026 /// type, for which isSCEVable must return true. For pointer types,
2027 /// this is the pointer-sized integer type.
2028 const Type *ScalarEvolution::getEffectiveSCEVType(const Type *Ty) const {
2029 assert(isSCEVable(Ty) && "Type is not SCEVable!");
2031 if (Ty->isInteger())
2034 assert(isa<PointerType>(Ty) && "Unexpected non-pointer non-integer type!");
2035 return TD->getIntPtrType();
2038 const SCEV *ScalarEvolution::getCouldNotCompute() {
2039 return &CouldNotCompute;
2042 /// getSCEV - Return an existing SCEV if it exists, otherwise analyze the
2043 /// expression and create a new one.
2044 const SCEV *ScalarEvolution::getSCEV(Value *V) {
2045 assert(isSCEVable(V->getType()) && "Value is not SCEVable!");
2047 std::map<SCEVCallbackVH, const SCEV *>::iterator I = Scalars.find(V);
2048 if (I != Scalars.end()) return I->second;
2049 const SCEV *S = createSCEV(V);
2050 Scalars.insert(std::make_pair(SCEVCallbackVH(V, this), S));
2054 /// getIntegerSCEV - Given a SCEVable type, create a constant for the
2055 /// specified signed integer value and return a SCEV for the constant.
2056 const SCEV *ScalarEvolution::getIntegerSCEV(int Val, const Type *Ty) {
2057 const IntegerType *ITy = cast<IntegerType>(getEffectiveSCEVType(Ty));
2058 return getConstant(ConstantInt::get(ITy, Val));
2061 /// getNegativeSCEV - Return a SCEV corresponding to -V = -1*V
2063 const SCEV *ScalarEvolution::getNegativeSCEV(const SCEV *V) {
2064 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2066 cast<ConstantInt>(Context->getConstantExprNeg(VC->getValue())));
2068 const Type *Ty = V->getType();
2069 Ty = getEffectiveSCEVType(Ty);
2070 return getMulExpr(V, getConstant(ConstantInt::getAllOnesValue(Ty)));
2073 /// getNotSCEV - Return a SCEV corresponding to ~V = -1-V
2074 const SCEV *ScalarEvolution::getNotSCEV(const SCEV *V) {
2075 if (const SCEVConstant *VC = dyn_cast<SCEVConstant>(V))
2076 return getConstant(cast<ConstantInt>(ConstantExpr::getNot(VC->getValue())));
2078 const Type *Ty = V->getType();
2079 Ty = getEffectiveSCEVType(Ty);
2080 const SCEV *AllOnes = getConstant(ConstantInt::getAllOnesValue(Ty));
2081 return getMinusSCEV(AllOnes, V);
2084 /// getMinusSCEV - Return a SCEV corresponding to LHS - RHS.
2086 const SCEV *ScalarEvolution::getMinusSCEV(const SCEV *LHS,
2089 return getAddExpr(LHS, getNegativeSCEV(RHS));
2092 /// getTruncateOrZeroExtend - Return a SCEV corresponding to a conversion of the
2093 /// input value to the specified type. If the type must be extended, it is zero
2096 ScalarEvolution::getTruncateOrZeroExtend(const SCEV *V,
2098 const Type *SrcTy = V->getType();
2099 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2100 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2101 "Cannot truncate or zero extend with non-integer arguments!");
2102 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2103 return V; // No conversion
2104 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2105 return getTruncateExpr(V, Ty);
2106 return getZeroExtendExpr(V, Ty);
2109 /// getTruncateOrSignExtend - Return a SCEV corresponding to a conversion of the
2110 /// input value to the specified type. If the type must be extended, it is sign
2113 ScalarEvolution::getTruncateOrSignExtend(const SCEV *V,
2115 const Type *SrcTy = V->getType();
2116 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2117 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2118 "Cannot truncate or zero extend with non-integer arguments!");
2119 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2120 return V; // No conversion
2121 if (getTypeSizeInBits(SrcTy) > getTypeSizeInBits(Ty))
2122 return getTruncateExpr(V, Ty);
2123 return getSignExtendExpr(V, Ty);
2126 /// getNoopOrZeroExtend - Return a SCEV corresponding to a conversion of the
2127 /// input value to the specified type. If the type must be extended, it is zero
2128 /// extended. The conversion must not be narrowing.
2130 ScalarEvolution::getNoopOrZeroExtend(const SCEV *V, const Type *Ty) {
2131 const Type *SrcTy = V->getType();
2132 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2133 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2134 "Cannot noop or zero extend with non-integer arguments!");
2135 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2136 "getNoopOrZeroExtend cannot truncate!");
2137 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2138 return V; // No conversion
2139 return getZeroExtendExpr(V, Ty);
2142 /// getNoopOrSignExtend - Return a SCEV corresponding to a conversion of the
2143 /// input value to the specified type. If the type must be extended, it is sign
2144 /// extended. The conversion must not be narrowing.
2146 ScalarEvolution::getNoopOrSignExtend(const SCEV *V, const Type *Ty) {
2147 const Type *SrcTy = V->getType();
2148 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2149 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2150 "Cannot noop or sign extend with non-integer arguments!");
2151 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2152 "getNoopOrSignExtend cannot truncate!");
2153 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2154 return V; // No conversion
2155 return getSignExtendExpr(V, Ty);
2158 /// getNoopOrAnyExtend - Return a SCEV corresponding to a conversion of
2159 /// the input value to the specified type. If the type must be extended,
2160 /// it is extended with unspecified bits. The conversion must not be
2163 ScalarEvolution::getNoopOrAnyExtend(const SCEV *V, const Type *Ty) {
2164 const Type *SrcTy = V->getType();
2165 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2166 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2167 "Cannot noop or any extend with non-integer arguments!");
2168 assert(getTypeSizeInBits(SrcTy) <= getTypeSizeInBits(Ty) &&
2169 "getNoopOrAnyExtend cannot truncate!");
2170 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2171 return V; // No conversion
2172 return getAnyExtendExpr(V, Ty);
2175 /// getTruncateOrNoop - Return a SCEV corresponding to a conversion of the
2176 /// input value to the specified type. The conversion must not be widening.
2178 ScalarEvolution::getTruncateOrNoop(const SCEV *V, const Type *Ty) {
2179 const Type *SrcTy = V->getType();
2180 assert((SrcTy->isInteger() || (TD && isa<PointerType>(SrcTy))) &&
2181 (Ty->isInteger() || (TD && isa<PointerType>(Ty))) &&
2182 "Cannot truncate or noop with non-integer arguments!");
2183 assert(getTypeSizeInBits(SrcTy) >= getTypeSizeInBits(Ty) &&
2184 "getTruncateOrNoop cannot extend!");
2185 if (getTypeSizeInBits(SrcTy) == getTypeSizeInBits(Ty))
2186 return V; // No conversion
2187 return getTruncateExpr(V, Ty);
2190 /// getUMaxFromMismatchedTypes - Promote the operands to the wider of
2191 /// the types using zero-extension, and then perform a umax operation
2193 const SCEV *ScalarEvolution::getUMaxFromMismatchedTypes(const SCEV *LHS,
2195 const SCEV *PromotedLHS = LHS;
2196 const SCEV *PromotedRHS = RHS;
2198 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2199 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2201 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2203 return getUMaxExpr(PromotedLHS, PromotedRHS);
2206 /// getUMinFromMismatchedTypes - Promote the operands to the wider of
2207 /// the types using zero-extension, and then perform a umin operation
2209 const SCEV *ScalarEvolution::getUMinFromMismatchedTypes(const SCEV *LHS,
2211 const SCEV *PromotedLHS = LHS;
2212 const SCEV *PromotedRHS = RHS;
2214 if (getTypeSizeInBits(LHS->getType()) > getTypeSizeInBits(RHS->getType()))
2215 PromotedRHS = getZeroExtendExpr(RHS, LHS->getType());
2217 PromotedLHS = getNoopOrZeroExtend(LHS, RHS->getType());
2219 return getUMinExpr(PromotedLHS, PromotedRHS);
2222 /// ReplaceSymbolicValueWithConcrete - This looks up the computed SCEV value for
2223 /// the specified instruction and replaces any references to the symbolic value
2224 /// SymName with the specified value. This is used during PHI resolution.
2226 ScalarEvolution::ReplaceSymbolicValueWithConcrete(Instruction *I,
2227 const SCEV *SymName,
2228 const SCEV *NewVal) {
2229 std::map<SCEVCallbackVH, const SCEV *>::iterator SI =
2230 Scalars.find(SCEVCallbackVH(I, this));
2231 if (SI == Scalars.end()) return;
2234 SI->second->replaceSymbolicValuesWithConcrete(SymName, NewVal, *this);
2235 if (NV == SI->second) return; // No change.
2237 SI->second = NV; // Update the scalars map!
2239 // Any instruction values that use this instruction might also need to be
2241 for (Value::use_iterator UI = I->use_begin(), E = I->use_end();
2243 ReplaceSymbolicValueWithConcrete(cast<Instruction>(*UI), SymName, NewVal);
2246 /// createNodeForPHI - PHI nodes have two cases. Either the PHI node exists in
2247 /// a loop header, making it a potential recurrence, or it doesn't.
2249 const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
2250 if (PN->getNumIncomingValues() == 2) // The loops have been canonicalized.
2251 if (const Loop *L = LI->getLoopFor(PN->getParent()))
2252 if (L->getHeader() == PN->getParent()) {
2253 // If it lives in the loop header, it has two incoming values, one
2254 // from outside the loop, and one from inside.
2255 unsigned IncomingEdge = L->contains(PN->getIncomingBlock(0));
2256 unsigned BackEdge = IncomingEdge^1;
2258 // While we are analyzing this PHI node, handle its value symbolically.
2259 const SCEV *SymbolicName = getUnknown(PN);
2260 assert(Scalars.find(PN) == Scalars.end() &&
2261 "PHI node already processed?");
2262 Scalars.insert(std::make_pair(SCEVCallbackVH(PN, this), SymbolicName));
2264 // Using this symbolic name for the PHI, analyze the value coming around
2266 const SCEV *BEValue = getSCEV(PN->getIncomingValue(BackEdge));
2268 // NOTE: If BEValue is loop invariant, we know that the PHI node just
2269 // has a special value for the first iteration of the loop.
2271 // If the value coming around the backedge is an add with the symbolic
2272 // value we just inserted, then we found a simple induction variable!
2273 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(BEValue)) {
2274 // If there is a single occurrence of the symbolic value, replace it
2275 // with a recurrence.
2276 unsigned FoundIndex = Add->getNumOperands();
2277 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2278 if (Add->getOperand(i) == SymbolicName)
2279 if (FoundIndex == e) {
2284 if (FoundIndex != Add->getNumOperands()) {
2285 // Create an add with everything but the specified operand.
2286 SmallVector<const SCEV *, 8> Ops;
2287 for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i)
2288 if (i != FoundIndex)
2289 Ops.push_back(Add->getOperand(i));
2290 const SCEV *Accum = getAddExpr(Ops);
2292 // This is not a valid addrec if the step amount is varying each
2293 // loop iteration, but is not itself an addrec in this loop.
2294 if (Accum->isLoopInvariant(L) ||
2295 (isa<SCEVAddRecExpr>(Accum) &&
2296 cast<SCEVAddRecExpr>(Accum)->getLoop() == L)) {
2297 const SCEV *StartVal =
2298 getSCEV(PN->getIncomingValue(IncomingEdge));
2299 const SCEV *PHISCEV =
2300 getAddRecExpr(StartVal, Accum, L);
2302 // Okay, for the entire analysis of this edge we assumed the PHI
2303 // to be symbolic. We now need to go back and update all of the
2304 // entries for the scalars that use the PHI (except for the PHI
2305 // itself) to use the new analyzed value instead of the "symbolic"
2307 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2311 } else if (const SCEVAddRecExpr *AddRec =
2312 dyn_cast<SCEVAddRecExpr>(BEValue)) {
2313 // Otherwise, this could be a loop like this:
2314 // i = 0; for (j = 1; ..; ++j) { .... i = j; }
2315 // In this case, j = {1,+,1} and BEValue is j.
2316 // Because the other in-value of i (0) fits the evolution of BEValue
2317 // i really is an addrec evolution.
2318 if (AddRec->getLoop() == L && AddRec->isAffine()) {
2319 const SCEV *StartVal = getSCEV(PN->getIncomingValue(IncomingEdge));
2321 // If StartVal = j.start - j.stride, we can use StartVal as the
2322 // initial step of the addrec evolution.
2323 if (StartVal == getMinusSCEV(AddRec->getOperand(0),
2324 AddRec->getOperand(1))) {
2325 const SCEV *PHISCEV =
2326 getAddRecExpr(StartVal, AddRec->getOperand(1), L);
2328 // Okay, for the entire analysis of this edge we assumed the PHI
2329 // to be symbolic. We now need to go back and update all of the
2330 // entries for the scalars that use the PHI (except for the PHI
2331 // itself) to use the new analyzed value instead of the "symbolic"
2333 ReplaceSymbolicValueWithConcrete(PN, SymbolicName, PHISCEV);
2339 return SymbolicName;
2342 // If it's not a loop phi, we can't handle it yet.
2343 return getUnknown(PN);
2346 /// createNodeForGEP - Expand GEP instructions into add and multiply
2347 /// operations. This allows them to be analyzed by regular SCEV code.
2349 const SCEV *ScalarEvolution::createNodeForGEP(User *GEP) {
2351 const Type *IntPtrTy = TD->getIntPtrType();
2352 Value *Base = GEP->getOperand(0);
2353 // Don't attempt to analyze GEPs over unsized objects.
2354 if (!cast<PointerType>(Base->getType())->getElementType()->isSized())
2355 return getUnknown(GEP);
2356 const SCEV *TotalOffset = getIntegerSCEV(0, IntPtrTy);
2357 gep_type_iterator GTI = gep_type_begin(GEP);
2358 for (GetElementPtrInst::op_iterator I = next(GEP->op_begin()),
2362 // Compute the (potentially symbolic) offset in bytes for this index.
2363 if (const StructType *STy = dyn_cast<StructType>(*GTI++)) {
2364 // For a struct, add the member offset.
2365 const StructLayout &SL = *TD->getStructLayout(STy);
2366 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
2367 uint64_t Offset = SL.getElementOffset(FieldNo);
2368 TotalOffset = getAddExpr(TotalOffset,
2369 getIntegerSCEV(Offset, IntPtrTy));
2371 // For an array, add the element offset, explicitly scaled.
2372 const SCEV *LocalOffset = getSCEV(Index);
2373 if (!isa<PointerType>(LocalOffset->getType()))
2374 // Getelementptr indicies are signed.
2375 LocalOffset = getTruncateOrSignExtend(LocalOffset,
2378 getMulExpr(LocalOffset,
2379 getIntegerSCEV(TD->getTypeAllocSize(*GTI),
2381 TotalOffset = getAddExpr(TotalOffset, LocalOffset);
2384 return getAddExpr(getSCEV(Base), TotalOffset);
2387 /// GetMinTrailingZeros - Determine the minimum number of zero bits that S is
2388 /// guaranteed to end in (at every loop iteration). It is, at the same time,
2389 /// the minimum number of times S is divisible by 2. For example, given {4,+,8}
2390 /// it returns 2. If S is guaranteed to be 0, it returns the bitwidth of S.
2392 ScalarEvolution::GetMinTrailingZeros(const SCEV *S) {
2393 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2394 return C->getValue()->getValue().countTrailingZeros();
2396 if (const SCEVTruncateExpr *T = dyn_cast<SCEVTruncateExpr>(S))
2397 return std::min(GetMinTrailingZeros(T->getOperand()),
2398 (uint32_t)getTypeSizeInBits(T->getType()));
2400 if (const SCEVZeroExtendExpr *E = dyn_cast<SCEVZeroExtendExpr>(S)) {
2401 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2402 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2403 getTypeSizeInBits(E->getType()) : OpRes;
2406 if (const SCEVSignExtendExpr *E = dyn_cast<SCEVSignExtendExpr>(S)) {
2407 uint32_t OpRes = GetMinTrailingZeros(E->getOperand());
2408 return OpRes == getTypeSizeInBits(E->getOperand()->getType()) ?
2409 getTypeSizeInBits(E->getType()) : OpRes;
2412 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2413 // The result is the min of all operands results.
2414 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2415 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2416 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2420 if (const SCEVMulExpr *M = dyn_cast<SCEVMulExpr>(S)) {
2421 // The result is the sum of all operands results.
2422 uint32_t SumOpRes = GetMinTrailingZeros(M->getOperand(0));
2423 uint32_t BitWidth = getTypeSizeInBits(M->getType());
2424 for (unsigned i = 1, e = M->getNumOperands();
2425 SumOpRes != BitWidth && i != e; ++i)
2426 SumOpRes = std::min(SumOpRes + GetMinTrailingZeros(M->getOperand(i)),
2431 if (const SCEVAddRecExpr *A = dyn_cast<SCEVAddRecExpr>(S)) {
2432 // The result is the min of all operands results.
2433 uint32_t MinOpRes = GetMinTrailingZeros(A->getOperand(0));
2434 for (unsigned i = 1, e = A->getNumOperands(); MinOpRes && i != e; ++i)
2435 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(A->getOperand(i)));
2439 if (const SCEVSMaxExpr *M = dyn_cast<SCEVSMaxExpr>(S)) {
2440 // The result is the min of all operands results.
2441 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2442 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2443 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2447 if (const SCEVUMaxExpr *M = dyn_cast<SCEVUMaxExpr>(S)) {
2448 // The result is the min of all operands results.
2449 uint32_t MinOpRes = GetMinTrailingZeros(M->getOperand(0));
2450 for (unsigned i = 1, e = M->getNumOperands(); MinOpRes && i != e; ++i)
2451 MinOpRes = std::min(MinOpRes, GetMinTrailingZeros(M->getOperand(i)));
2455 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2456 // For a SCEVUnknown, ask ValueTracking.
2457 unsigned BitWidth = getTypeSizeInBits(U->getType());
2458 APInt Mask = APInt::getAllOnesValue(BitWidth);
2459 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2460 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones);
2461 return Zeros.countTrailingOnes();
2469 ScalarEvolution::GetMinLeadingZeros(const SCEV *S) {
2470 // TODO: Handle other SCEV expression types here.
2472 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S))
2473 return C->getValue()->getValue().countLeadingZeros();
2475 if (const SCEVZeroExtendExpr *C = dyn_cast<SCEVZeroExtendExpr>(S)) {
2476 // A zero-extension cast adds zero bits.
2477 return GetMinLeadingZeros(C->getOperand()) +
2478 (getTypeSizeInBits(C->getType()) -
2479 getTypeSizeInBits(C->getOperand()->getType()));
2482 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2483 // For a SCEVUnknown, ask ValueTracking.
2484 unsigned BitWidth = getTypeSizeInBits(U->getType());
2485 APInt Mask = APInt::getAllOnesValue(BitWidth);
2486 APInt Zeros(BitWidth, 0), Ones(BitWidth, 0);
2487 ComputeMaskedBits(U->getValue(), Mask, Zeros, Ones, TD);
2488 return Zeros.countLeadingOnes();
2495 ScalarEvolution::GetMinSignBits(const SCEV *S) {
2496 // TODO: Handle other SCEV expression types here.
2498 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
2499 const APInt &A = C->getValue()->getValue();
2500 return A.isNegative() ? A.countLeadingOnes() :
2501 A.countLeadingZeros();
2504 if (const SCEVSignExtendExpr *C = dyn_cast<SCEVSignExtendExpr>(S)) {
2505 // A sign-extension cast adds sign bits.
2506 return GetMinSignBits(C->getOperand()) +
2507 (getTypeSizeInBits(C->getType()) -
2508 getTypeSizeInBits(C->getOperand()->getType()));
2511 if (const SCEVAddExpr *A = dyn_cast<SCEVAddExpr>(S)) {
2512 unsigned BitWidth = getTypeSizeInBits(A->getType());
2514 // Special case decrementing a value (ADD X, -1):
2515 if (const SCEVConstant *CRHS = dyn_cast<SCEVConstant>(A->getOperand(0)))
2516 if (CRHS->isAllOnesValue()) {
2517 SmallVector<const SCEV *, 4> OtherOps(A->op_begin() + 1, A->op_end());
2518 const SCEV *OtherOpsAdd = getAddExpr(OtherOps);
2519 unsigned LZ = GetMinLeadingZeros(OtherOpsAdd);
2521 // If the input is known to be 0 or 1, the output is 0/-1, which is all
2523 if (LZ == BitWidth - 1)
2526 // If we are subtracting one from a positive number, there is no carry
2527 // out of the result.
2529 return GetMinSignBits(OtherOpsAdd);
2532 // Add can have at most one carry bit. Thus we know that the output
2533 // is, at worst, one more bit than the inputs.
2534 unsigned Min = BitWidth;
2535 for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) {
2536 unsigned N = GetMinSignBits(A->getOperand(i));
2537 Min = std::min(Min, N) - 1;
2538 if (Min == 0) return 1;
2543 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2544 // For a SCEVUnknown, ask ValueTracking.
2545 return ComputeNumSignBits(U->getValue(), TD);
2551 /// createSCEV - We know that there is no SCEV for the specified value.
2552 /// Analyze the expression.
2554 const SCEV *ScalarEvolution::createSCEV(Value *V) {
2555 if (!isSCEVable(V->getType()))
2556 return getUnknown(V);
2558 unsigned Opcode = Instruction::UserOp1;
2559 if (Instruction *I = dyn_cast<Instruction>(V))
2560 Opcode = I->getOpcode();
2561 else if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
2562 Opcode = CE->getOpcode();
2563 else if (ConstantInt *CI = dyn_cast<ConstantInt>(V))
2564 return getConstant(CI);
2565 else if (isa<ConstantPointerNull>(V))
2566 return getIntegerSCEV(0, V->getType());
2567 else if (isa<UndefValue>(V))
2568 return getIntegerSCEV(0, V->getType());
2570 return getUnknown(V);
2572 User *U = cast<User>(V);
2574 case Instruction::Add:
2575 return getAddExpr(getSCEV(U->getOperand(0)),
2576 getSCEV(U->getOperand(1)));
2577 case Instruction::Mul:
2578 return getMulExpr(getSCEV(U->getOperand(0)),
2579 getSCEV(U->getOperand(1)));
2580 case Instruction::UDiv:
2581 return getUDivExpr(getSCEV(U->getOperand(0)),
2582 getSCEV(U->getOperand(1)));
2583 case Instruction::Sub:
2584 return getMinusSCEV(getSCEV(U->getOperand(0)),
2585 getSCEV(U->getOperand(1)));
2586 case Instruction::And:
2587 // For an expression like x&255 that merely masks off the high bits,
2588 // use zext(trunc(x)) as the SCEV expression.
2589 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2590 if (CI->isNullValue())
2591 return getSCEV(U->getOperand(1));
2592 if (CI->isAllOnesValue())
2593 return getSCEV(U->getOperand(0));
2594 const APInt &A = CI->getValue();
2596 // Instcombine's ShrinkDemandedConstant may strip bits out of
2597 // constants, obscuring what would otherwise be a low-bits mask.
2598 // Use ComputeMaskedBits to compute what ShrinkDemandedConstant
2599 // knew about to reconstruct a low-bits mask value.
2600 unsigned LZ = A.countLeadingZeros();
2601 unsigned BitWidth = A.getBitWidth();
2602 APInt AllOnes = APInt::getAllOnesValue(BitWidth);
2603 APInt KnownZero(BitWidth, 0), KnownOne(BitWidth, 0);
2604 ComputeMaskedBits(U->getOperand(0), AllOnes, KnownZero, KnownOne, TD);
2606 APInt EffectiveMask = APInt::getLowBitsSet(BitWidth, BitWidth - LZ);
2608 if (LZ != 0 && !((~A & ~KnownZero) & EffectiveMask))
2610 getZeroExtendExpr(getTruncateExpr(getSCEV(U->getOperand(0)),
2611 IntegerType::get(BitWidth - LZ)),
2616 case Instruction::Or:
2617 // If the RHS of the Or is a constant, we may have something like:
2618 // X*4+1 which got turned into X*4|1. Handle this as an Add so loop
2619 // optimizations will transparently handle this case.
2621 // In order for this transformation to be safe, the LHS must be of the
2622 // form X*(2^n) and the Or constant must be less than 2^n.
2623 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2624 const SCEV *LHS = getSCEV(U->getOperand(0));
2625 const APInt &CIVal = CI->getValue();
2626 if (GetMinTrailingZeros(LHS) >=
2627 (CIVal.getBitWidth() - CIVal.countLeadingZeros()))
2628 return getAddExpr(LHS, getSCEV(U->getOperand(1)));
2631 case Instruction::Xor:
2632 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1))) {
2633 // If the RHS of the xor is a signbit, then this is just an add.
2634 // Instcombine turns add of signbit into xor as a strength reduction step.
2635 if (CI->getValue().isSignBit())
2636 return getAddExpr(getSCEV(U->getOperand(0)),
2637 getSCEV(U->getOperand(1)));
2639 // If the RHS of xor is -1, then this is a not operation.
2640 if (CI->isAllOnesValue())
2641 return getNotSCEV(getSCEV(U->getOperand(0)));
2643 // Model xor(and(x, C), C) as and(~x, C), if C is a low-bits mask.
2644 // This is a variant of the check for xor with -1, and it handles
2645 // the case where instcombine has trimmed non-demanded bits out
2646 // of an xor with -1.
2647 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(U->getOperand(0)))
2648 if (ConstantInt *LCI = dyn_cast<ConstantInt>(BO->getOperand(1)))
2649 if (BO->getOpcode() == Instruction::And &&
2650 LCI->getValue() == CI->getValue())
2651 if (const SCEVZeroExtendExpr *Z =
2652 dyn_cast<SCEVZeroExtendExpr>(getSCEV(U->getOperand(0)))) {
2653 const Type *UTy = U->getType();
2654 const SCEV *Z0 = Z->getOperand();
2655 const Type *Z0Ty = Z0->getType();
2656 unsigned Z0TySize = getTypeSizeInBits(Z0Ty);
2658 // If C is a low-bits mask, the zero extend is zerving to
2659 // mask off the high bits. Complement the operand and
2660 // re-apply the zext.
2661 if (APIntOps::isMask(Z0TySize, CI->getValue()))
2662 return getZeroExtendExpr(getNotSCEV(Z0), UTy);
2664 // If C is a single bit, it may be in the sign-bit position
2665 // before the zero-extend. In this case, represent the xor
2666 // using an add, which is equivalent, and re-apply the zext.
2667 APInt Trunc = APInt(CI->getValue()).trunc(Z0TySize);
2668 if (APInt(Trunc).zext(getTypeSizeInBits(UTy)) == CI->getValue() &&
2670 return getZeroExtendExpr(getAddExpr(Z0, getConstant(Trunc)),
2676 case Instruction::Shl:
2677 // Turn shift left of a constant amount into a multiply.
2678 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2679 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2680 Constant *X = ConstantInt::get(
2681 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2682 return getMulExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2686 case Instruction::LShr:
2687 // Turn logical shift right of a constant into a unsigned divide.
2688 if (ConstantInt *SA = dyn_cast<ConstantInt>(U->getOperand(1))) {
2689 uint32_t BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
2690 Constant *X = ConstantInt::get(
2691 APInt(BitWidth, 1).shl(SA->getLimitedValue(BitWidth)));
2692 return getUDivExpr(getSCEV(U->getOperand(0)), getSCEV(X));
2696 case Instruction::AShr:
2697 // For a two-shift sext-inreg, use sext(trunc(x)) as the SCEV expression.
2698 if (ConstantInt *CI = dyn_cast<ConstantInt>(U->getOperand(1)))
2699 if (Instruction *L = dyn_cast<Instruction>(U->getOperand(0)))
2700 if (L->getOpcode() == Instruction::Shl &&
2701 L->getOperand(1) == U->getOperand(1)) {
2702 unsigned BitWidth = getTypeSizeInBits(U->getType());
2703 uint64_t Amt = BitWidth - CI->getZExtValue();
2704 if (Amt == BitWidth)
2705 return getSCEV(L->getOperand(0)); // shift by zero --> noop
2707 return getIntegerSCEV(0, U->getType()); // value is undefined
2709 getSignExtendExpr(getTruncateExpr(getSCEV(L->getOperand(0)),
2710 IntegerType::get(Amt)),
2715 case Instruction::Trunc:
2716 return getTruncateExpr(getSCEV(U->getOperand(0)), U->getType());
2718 case Instruction::ZExt:
2719 return getZeroExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2721 case Instruction::SExt:
2722 return getSignExtendExpr(getSCEV(U->getOperand(0)), U->getType());
2724 case Instruction::BitCast:
2725 // BitCasts are no-op casts so we just eliminate the cast.
2726 if (isSCEVable(U->getType()) && isSCEVable(U->getOperand(0)->getType()))
2727 return getSCEV(U->getOperand(0));
2730 case Instruction::IntToPtr:
2731 if (!TD) break; // Without TD we can't analyze pointers.
2732 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2733 TD->getIntPtrType());
2735 case Instruction::PtrToInt:
2736 if (!TD) break; // Without TD we can't analyze pointers.
2737 return getTruncateOrZeroExtend(getSCEV(U->getOperand(0)),
2740 case Instruction::GetElementPtr:
2741 if (!TD) break; // Without TD we can't analyze pointers.
2742 return createNodeForGEP(U);
2744 case Instruction::PHI:
2745 return createNodeForPHI(cast<PHINode>(U));
2747 case Instruction::Select:
2748 // This could be a smax or umax that was lowered earlier.
2749 // Try to recover it.
2750 if (ICmpInst *ICI = dyn_cast<ICmpInst>(U->getOperand(0))) {
2751 Value *LHS = ICI->getOperand(0);
2752 Value *RHS = ICI->getOperand(1);
2753 switch (ICI->getPredicate()) {
2754 case ICmpInst::ICMP_SLT:
2755 case ICmpInst::ICMP_SLE:
2756 std::swap(LHS, RHS);
2758 case ICmpInst::ICMP_SGT:
2759 case ICmpInst::ICMP_SGE:
2760 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2761 return getSMaxExpr(getSCEV(LHS), getSCEV(RHS));
2762 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2763 return getSMinExpr(getSCEV(LHS), getSCEV(RHS));
2765 case ICmpInst::ICMP_ULT:
2766 case ICmpInst::ICMP_ULE:
2767 std::swap(LHS, RHS);
2769 case ICmpInst::ICMP_UGT:
2770 case ICmpInst::ICMP_UGE:
2771 if (LHS == U->getOperand(1) && RHS == U->getOperand(2))
2772 return getUMaxExpr(getSCEV(LHS), getSCEV(RHS));
2773 else if (LHS == U->getOperand(2) && RHS == U->getOperand(1))
2774 return getUMinExpr(getSCEV(LHS), getSCEV(RHS));
2776 case ICmpInst::ICMP_NE:
2777 // n != 0 ? n : 1 -> umax(n, 1)
2778 if (LHS == U->getOperand(1) &&
2779 isa<ConstantInt>(U->getOperand(2)) &&
2780 cast<ConstantInt>(U->getOperand(2))->isOne() &&
2781 isa<ConstantInt>(RHS) &&
2782 cast<ConstantInt>(RHS)->isZero())
2783 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(2)));
2785 case ICmpInst::ICMP_EQ:
2786 // n == 0 ? 1 : n -> umax(n, 1)
2787 if (LHS == U->getOperand(2) &&
2788 isa<ConstantInt>(U->getOperand(1)) &&
2789 cast<ConstantInt>(U->getOperand(1))->isOne() &&
2790 isa<ConstantInt>(RHS) &&
2791 cast<ConstantInt>(RHS)->isZero())
2792 return getUMaxExpr(getSCEV(LHS), getSCEV(U->getOperand(1)));
2799 default: // We cannot analyze this expression.
2803 return getUnknown(V);
2808 //===----------------------------------------------------------------------===//
2809 // Iteration Count Computation Code
2812 /// getBackedgeTakenCount - If the specified loop has a predictable
2813 /// backedge-taken count, return it, otherwise return a SCEVCouldNotCompute
2814 /// object. The backedge-taken count is the number of times the loop header
2815 /// will be branched to from within the loop. This is one less than the
2816 /// trip count of the loop, since it doesn't count the first iteration,
2817 /// when the header is branched to from outside the loop.
2819 /// Note that it is not valid to call this method on a loop without a
2820 /// loop-invariant backedge-taken count (see
2821 /// hasLoopInvariantBackedgeTakenCount).
2823 const SCEV *ScalarEvolution::getBackedgeTakenCount(const Loop *L) {
2824 return getBackedgeTakenInfo(L).Exact;
2827 /// getMaxBackedgeTakenCount - Similar to getBackedgeTakenCount, except
2828 /// return the least SCEV value that is known never to be less than the
2829 /// actual backedge taken count.
2830 const SCEV *ScalarEvolution::getMaxBackedgeTakenCount(const Loop *L) {
2831 return getBackedgeTakenInfo(L).Max;
2834 /// PushLoopPHIs - Push PHI nodes in the header of the given loop
2835 /// onto the given Worklist.
2837 PushLoopPHIs(const Loop *L, SmallVectorImpl<Instruction *> &Worklist) {
2838 BasicBlock *Header = L->getHeader();
2840 // Push all Loop-header PHIs onto the Worklist stack.
2841 for (BasicBlock::iterator I = Header->begin();
2842 PHINode *PN = dyn_cast<PHINode>(I); ++I)
2843 Worklist.push_back(PN);
2846 /// PushDefUseChildren - Push users of the given Instruction
2847 /// onto the given Worklist.
2849 PushDefUseChildren(Instruction *I,
2850 SmallVectorImpl<Instruction *> &Worklist) {
2851 // Push the def-use children onto the Worklist stack.
2852 for (Value::use_iterator UI = I->use_begin(), UE = I->use_end();
2854 Worklist.push_back(cast<Instruction>(UI));
2857 const ScalarEvolution::BackedgeTakenInfo &
2858 ScalarEvolution::getBackedgeTakenInfo(const Loop *L) {
2859 // Initially insert a CouldNotCompute for this loop. If the insertion
2860 // succeeds, procede to actually compute a backedge-taken count and
2861 // update the value. The temporary CouldNotCompute value tells SCEV
2862 // code elsewhere that it shouldn't attempt to request a new
2863 // backedge-taken count, which could result in infinite recursion.
2864 std::pair<std::map<const Loop*, BackedgeTakenInfo>::iterator, bool> Pair =
2865 BackedgeTakenCounts.insert(std::make_pair(L, getCouldNotCompute()));
2867 BackedgeTakenInfo ItCount = ComputeBackedgeTakenCount(L);
2868 if (ItCount.Exact != getCouldNotCompute()) {
2869 assert(ItCount.Exact->isLoopInvariant(L) &&
2870 ItCount.Max->isLoopInvariant(L) &&
2871 "Computed trip count isn't loop invariant for loop!");
2872 ++NumTripCountsComputed;
2874 // Update the value in the map.
2875 Pair.first->second = ItCount;
2877 if (ItCount.Max != getCouldNotCompute())
2878 // Update the value in the map.
2879 Pair.first->second = ItCount;
2880 if (isa<PHINode>(L->getHeader()->begin()))
2881 // Only count loops that have phi nodes as not being computable.
2882 ++NumTripCountsNotComputed;
2885 // Now that we know more about the trip count for this loop, forget any
2886 // existing SCEV values for PHI nodes in this loop since they are only
2887 // conservative estimates made without the benefit of trip count
2888 // information. This is similar to the code in
2889 // forgetLoopBackedgeTakenCount, except that it handles SCEVUnknown PHI
2891 if (ItCount.hasAnyInfo()) {
2892 SmallVector<Instruction *, 16> Worklist;
2893 PushLoopPHIs(L, Worklist);
2895 SmallPtrSet<Instruction *, 8> Visited;
2896 while (!Worklist.empty()) {
2897 Instruction *I = Worklist.pop_back_val();
2898 if (!Visited.insert(I)) continue;
2900 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2901 Scalars.find(static_cast<Value *>(I));
2902 if (It != Scalars.end()) {
2903 // SCEVUnknown for a PHI either means that it has an unrecognized
2904 // structure, or it's a PHI that's in the progress of being computed
2905 // by createNodeForPHI. In the former case, additional loop trip count
2906 // information isn't going to change anything. In the later case,
2907 // createNodeForPHI will perform the necessary updates on its own when
2908 // it gets to that point.
2909 if (!isa<PHINode>(I) || !isa<SCEVUnknown>(It->second))
2911 ValuesAtScopes.erase(I);
2912 if (PHINode *PN = dyn_cast<PHINode>(I))
2913 ConstantEvolutionLoopExitValue.erase(PN);
2916 PushDefUseChildren(I, Worklist);
2920 return Pair.first->second;
2923 /// forgetLoopBackedgeTakenCount - This method should be called by the
2924 /// client when it has changed a loop in a way that may effect
2925 /// ScalarEvolution's ability to compute a trip count, or if the loop
2927 void ScalarEvolution::forgetLoopBackedgeTakenCount(const Loop *L) {
2928 BackedgeTakenCounts.erase(L);
2930 SmallVector<Instruction *, 16> Worklist;
2931 PushLoopPHIs(L, Worklist);
2933 SmallPtrSet<Instruction *, 8> Visited;
2934 while (!Worklist.empty()) {
2935 Instruction *I = Worklist.pop_back_val();
2936 if (!Visited.insert(I)) continue;
2938 std::map<SCEVCallbackVH, const SCEV*>::iterator It =
2939 Scalars.find(static_cast<Value *>(I));
2940 if (It != Scalars.end()) {
2942 ValuesAtScopes.erase(I);
2943 if (PHINode *PN = dyn_cast<PHINode>(I))
2944 ConstantEvolutionLoopExitValue.erase(PN);
2947 PushDefUseChildren(I, Worklist);
2951 /// ComputeBackedgeTakenCount - Compute the number of times the backedge
2952 /// of the specified loop will execute.
2953 ScalarEvolution::BackedgeTakenInfo
2954 ScalarEvolution::ComputeBackedgeTakenCount(const Loop *L) {
2955 SmallVector<BasicBlock*, 8> ExitingBlocks;
2956 L->getExitingBlocks(ExitingBlocks);
2958 // Examine all exits and pick the most conservative values.
2959 const SCEV *BECount = getCouldNotCompute();
2960 const SCEV *MaxBECount = getCouldNotCompute();
2961 bool CouldNotComputeBECount = false;
2962 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
2963 BackedgeTakenInfo NewBTI =
2964 ComputeBackedgeTakenCountFromExit(L, ExitingBlocks[i]);
2966 if (NewBTI.Exact == getCouldNotCompute()) {
2967 // We couldn't compute an exact value for this exit, so
2968 // we won't be able to compute an exact value for the loop.
2969 CouldNotComputeBECount = true;
2970 BECount = getCouldNotCompute();
2971 } else if (!CouldNotComputeBECount) {
2972 if (BECount == getCouldNotCompute())
2973 BECount = NewBTI.Exact;
2975 BECount = getUMinFromMismatchedTypes(BECount, NewBTI.Exact);
2977 if (MaxBECount == getCouldNotCompute())
2978 MaxBECount = NewBTI.Max;
2979 else if (NewBTI.Max != getCouldNotCompute())
2980 MaxBECount = getUMinFromMismatchedTypes(MaxBECount, NewBTI.Max);
2983 return BackedgeTakenInfo(BECount, MaxBECount);
2986 /// ComputeBackedgeTakenCountFromExit - Compute the number of times the backedge
2987 /// of the specified loop will execute if it exits via the specified block.
2988 ScalarEvolution::BackedgeTakenInfo
2989 ScalarEvolution::ComputeBackedgeTakenCountFromExit(const Loop *L,
2990 BasicBlock *ExitingBlock) {
2992 // Okay, we've chosen an exiting block. See what condition causes us to
2993 // exit at this block.
2995 // FIXME: we should be able to handle switch instructions (with a single exit)
2996 BranchInst *ExitBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
2997 if (ExitBr == 0) return getCouldNotCompute();
2998 assert(ExitBr->isConditional() && "If unconditional, it can't be in loop!");
3000 // At this point, we know we have a conditional branch that determines whether
3001 // the loop is exited. However, we don't know if the branch is executed each
3002 // time through the loop. If not, then the execution count of the branch will
3003 // not be equal to the trip count of the loop.
3005 // Currently we check for this by checking to see if the Exit branch goes to
3006 // the loop header. If so, we know it will always execute the same number of
3007 // times as the loop. We also handle the case where the exit block *is* the
3008 // loop header. This is common for un-rotated loops.
3010 // If both of those tests fail, walk up the unique predecessor chain to the
3011 // header, stopping if there is an edge that doesn't exit the loop. If the
3012 // header is reached, the execution count of the branch will be equal to the
3013 // trip count of the loop.
3015 // More extensive analysis could be done to handle more cases here.
3017 if (ExitBr->getSuccessor(0) != L->getHeader() &&
3018 ExitBr->getSuccessor(1) != L->getHeader() &&
3019 ExitBr->getParent() != L->getHeader()) {
3020 // The simple checks failed, try climbing the unique predecessor chain
3021 // up to the header.
3023 for (BasicBlock *BB = ExitBr->getParent(); BB; ) {
3024 BasicBlock *Pred = BB->getUniquePredecessor();
3026 return getCouldNotCompute();
3027 TerminatorInst *PredTerm = Pred->getTerminator();
3028 for (unsigned i = 0, e = PredTerm->getNumSuccessors(); i != e; ++i) {
3029 BasicBlock *PredSucc = PredTerm->getSuccessor(i);
3032 // If the predecessor has a successor that isn't BB and isn't
3033 // outside the loop, assume the worst.
3034 if (L->contains(PredSucc))
3035 return getCouldNotCompute();
3037 if (Pred == L->getHeader()) {
3044 return getCouldNotCompute();
3047 // Procede to the next level to examine the exit condition expression.
3048 return ComputeBackedgeTakenCountFromExitCond(L, ExitBr->getCondition(),
3049 ExitBr->getSuccessor(0),
3050 ExitBr->getSuccessor(1));
3053 /// ComputeBackedgeTakenCountFromExitCond - Compute the number of times the
3054 /// backedge of the specified loop will execute if its exit condition
3055 /// were a conditional branch of ExitCond, TBB, and FBB.
3056 ScalarEvolution::BackedgeTakenInfo
3057 ScalarEvolution::ComputeBackedgeTakenCountFromExitCond(const Loop *L,
3061 // Check if the controlling expression for this loop is an And or Or.
3062 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(ExitCond)) {
3063 if (BO->getOpcode() == Instruction::And) {
3064 // Recurse on the operands of the and.
3065 BackedgeTakenInfo BTI0 =
3066 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3067 BackedgeTakenInfo BTI1 =
3068 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3069 const SCEV *BECount = getCouldNotCompute();
3070 const SCEV *MaxBECount = getCouldNotCompute();
3071 if (L->contains(TBB)) {
3072 // Both conditions must be true for the loop to continue executing.
3073 // Choose the less conservative count.
3074 if (BTI0.Exact == getCouldNotCompute() ||
3075 BTI1.Exact == getCouldNotCompute())
3076 BECount = getCouldNotCompute();
3078 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3079 if (BTI0.Max == getCouldNotCompute())
3080 MaxBECount = BTI1.Max;
3081 else if (BTI1.Max == getCouldNotCompute())
3082 MaxBECount = BTI0.Max;
3084 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3086 // Both conditions must be true for the loop to exit.
3087 assert(L->contains(FBB) && "Loop block has no successor in loop!");
3088 if (BTI0.Exact != getCouldNotCompute() &&
3089 BTI1.Exact != getCouldNotCompute())
3090 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3091 if (BTI0.Max != getCouldNotCompute() &&
3092 BTI1.Max != getCouldNotCompute())
3093 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3096 return BackedgeTakenInfo(BECount, MaxBECount);
3098 if (BO->getOpcode() == Instruction::Or) {
3099 // Recurse on the operands of the or.
3100 BackedgeTakenInfo BTI0 =
3101 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(0), TBB, FBB);
3102 BackedgeTakenInfo BTI1 =
3103 ComputeBackedgeTakenCountFromExitCond(L, BO->getOperand(1), TBB, FBB);
3104 const SCEV *BECount = getCouldNotCompute();
3105 const SCEV *MaxBECount = getCouldNotCompute();
3106 if (L->contains(FBB)) {
3107 // Both conditions must be false for the loop to continue executing.
3108 // Choose the less conservative count.
3109 if (BTI0.Exact == getCouldNotCompute() ||
3110 BTI1.Exact == getCouldNotCompute())
3111 BECount = getCouldNotCompute();
3113 BECount = getUMinFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3114 if (BTI0.Max == getCouldNotCompute())
3115 MaxBECount = BTI1.Max;
3116 else if (BTI1.Max == getCouldNotCompute())
3117 MaxBECount = BTI0.Max;
3119 MaxBECount = getUMinFromMismatchedTypes(BTI0.Max, BTI1.Max);
3121 // Both conditions must be false for the loop to exit.
3122 assert(L->contains(TBB) && "Loop block has no successor in loop!");
3123 if (BTI0.Exact != getCouldNotCompute() &&
3124 BTI1.Exact != getCouldNotCompute())
3125 BECount = getUMaxFromMismatchedTypes(BTI0.Exact, BTI1.Exact);
3126 if (BTI0.Max != getCouldNotCompute() &&
3127 BTI1.Max != getCouldNotCompute())
3128 MaxBECount = getUMaxFromMismatchedTypes(BTI0.Max, BTI1.Max);
3131 return BackedgeTakenInfo(BECount, MaxBECount);
3135 // With an icmp, it may be feasible to compute an exact backedge-taken count.
3136 // Procede to the next level to examine the icmp.
3137 if (ICmpInst *ExitCondICmp = dyn_cast<ICmpInst>(ExitCond))
3138 return ComputeBackedgeTakenCountFromExitCondICmp(L, ExitCondICmp, TBB, FBB);
3140 // If it's not an integer or pointer comparison then compute it the hard way.
3141 return ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3144 /// ComputeBackedgeTakenCountFromExitCondICmp - Compute the number of times the
3145 /// backedge of the specified loop will execute if its exit condition
3146 /// were a conditional branch of the ICmpInst ExitCond, TBB, and FBB.
3147 ScalarEvolution::BackedgeTakenInfo
3148 ScalarEvolution::ComputeBackedgeTakenCountFromExitCondICmp(const Loop *L,
3153 // If the condition was exit on true, convert the condition to exit on false
3154 ICmpInst::Predicate Cond;
3155 if (!L->contains(FBB))
3156 Cond = ExitCond->getPredicate();
3158 Cond = ExitCond->getInversePredicate();
3160 // Handle common loops like: for (X = "string"; *X; ++X)
3161 if (LoadInst *LI = dyn_cast<LoadInst>(ExitCond->getOperand(0)))
3162 if (Constant *RHS = dyn_cast<Constant>(ExitCond->getOperand(1))) {
3164 ComputeLoadConstantCompareBackedgeTakenCount(LI, RHS, L, Cond);
3165 if (!isa<SCEVCouldNotCompute>(ItCnt)) {
3166 unsigned BitWidth = getTypeSizeInBits(ItCnt->getType());
3167 return BackedgeTakenInfo(ItCnt,
3168 isa<SCEVConstant>(ItCnt) ? ItCnt :
3169 getConstant(APInt::getMaxValue(BitWidth)-1));
3173 const SCEV *LHS = getSCEV(ExitCond->getOperand(0));
3174 const SCEV *RHS = getSCEV(ExitCond->getOperand(1));
3176 // Try to evaluate any dependencies out of the loop.
3177 LHS = getSCEVAtScope(LHS, L);
3178 RHS = getSCEVAtScope(RHS, L);
3180 // At this point, we would like to compute how many iterations of the
3181 // loop the predicate will return true for these inputs.
3182 if (LHS->isLoopInvariant(L) && !RHS->isLoopInvariant(L)) {
3183 // If there is a loop-invariant, force it into the RHS.
3184 std::swap(LHS, RHS);
3185 Cond = ICmpInst::getSwappedPredicate(Cond);
3188 // If we have a comparison of a chrec against a constant, try to use value
3189 // ranges to answer this query.
3190 if (const SCEVConstant *RHSC = dyn_cast<SCEVConstant>(RHS))
3191 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS))
3192 if (AddRec->getLoop() == L) {
3193 // Form the constant range.
3194 ConstantRange CompRange(
3195 ICmpInst::makeConstantRange(Cond, RHSC->getValue()->getValue()));
3197 const SCEV *Ret = AddRec->getNumIterationsInRange(CompRange, *this);
3198 if (!isa<SCEVCouldNotCompute>(Ret)) return Ret;
3202 case ICmpInst::ICMP_NE: { // while (X != Y)
3203 // Convert to: while (X-Y != 0)
3204 const SCEV *TC = HowFarToZero(getMinusSCEV(LHS, RHS), L);
3205 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3208 case ICmpInst::ICMP_EQ: {
3209 // Convert to: while (X-Y == 0) // while (X == Y)
3210 const SCEV *TC = HowFarToNonZero(getMinusSCEV(LHS, RHS), L);
3211 if (!isa<SCEVCouldNotCompute>(TC)) return TC;
3214 case ICmpInst::ICMP_SLT: {
3215 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, true);
3216 if (BTI.hasAnyInfo()) return BTI;
3219 case ICmpInst::ICMP_SGT: {
3220 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3221 getNotSCEV(RHS), L, true);
3222 if (BTI.hasAnyInfo()) return BTI;
3225 case ICmpInst::ICMP_ULT: {
3226 BackedgeTakenInfo BTI = HowManyLessThans(LHS, RHS, L, false);
3227 if (BTI.hasAnyInfo()) return BTI;
3230 case ICmpInst::ICMP_UGT: {
3231 BackedgeTakenInfo BTI = HowManyLessThans(getNotSCEV(LHS),
3232 getNotSCEV(RHS), L, false);
3233 if (BTI.hasAnyInfo()) return BTI;
3238 errs() << "ComputeBackedgeTakenCount ";
3239 if (ExitCond->getOperand(0)->getType()->isUnsigned())
3240 errs() << "[unsigned] ";
3241 errs() << *LHS << " "
3242 << Instruction::getOpcodeName(Instruction::ICmp)
3243 << " " << *RHS << "\n";
3248 ComputeBackedgeTakenCountExhaustively(L, ExitCond, !L->contains(TBB));
3251 static ConstantInt *
3252 EvaluateConstantChrecAtConstant(const SCEVAddRecExpr *AddRec, ConstantInt *C,
3253 ScalarEvolution &SE) {
3254 const SCEV *InVal = SE.getConstant(C);
3255 const SCEV *Val = AddRec->evaluateAtIteration(InVal, SE);
3256 assert(isa<SCEVConstant>(Val) &&
3257 "Evaluation of SCEV at constant didn't fold correctly?");
3258 return cast<SCEVConstant>(Val)->getValue();
3261 /// GetAddressedElementFromGlobal - Given a global variable with an initializer
3262 /// and a GEP expression (missing the pointer index) indexing into it, return
3263 /// the addressed element of the initializer or null if the index expression is
3266 GetAddressedElementFromGlobal(LLVMContext *Context, GlobalVariable *GV,
3267 const std::vector<ConstantInt*> &Indices) {
3268 Constant *Init = GV->getInitializer();
3269 for (unsigned i = 0, e = Indices.size(); i != e; ++i) {
3270 uint64_t Idx = Indices[i]->getZExtValue();
3271 if (ConstantStruct *CS = dyn_cast<ConstantStruct>(Init)) {
3272 assert(Idx < CS->getNumOperands() && "Bad struct index!");
3273 Init = cast<Constant>(CS->getOperand(Idx));
3274 } else if (ConstantArray *CA = dyn_cast<ConstantArray>(Init)) {
3275 if (Idx >= CA->getNumOperands()) return 0; // Bogus program
3276 Init = cast<Constant>(CA->getOperand(Idx));
3277 } else if (isa<ConstantAggregateZero>(Init)) {
3278 if (const StructType *STy = dyn_cast<StructType>(Init->getType())) {
3279 assert(Idx < STy->getNumElements() && "Bad struct index!");
3280 Init = Context->getNullValue(STy->getElementType(Idx));
3281 } else if (const ArrayType *ATy = dyn_cast<ArrayType>(Init->getType())) {
3282 if (Idx >= ATy->getNumElements()) return 0; // Bogus program
3283 Init = Context->getNullValue(ATy->getElementType());
3285 LLVM_UNREACHABLE("Unknown constant aggregate type!");
3289 return 0; // Unknown initializer type
3295 /// ComputeLoadConstantCompareBackedgeTakenCount - Given an exit condition of
3296 /// 'icmp op load X, cst', try to see if we can compute the backedge
3297 /// execution count.
3299 ScalarEvolution::ComputeLoadConstantCompareBackedgeTakenCount(
3303 ICmpInst::Predicate predicate) {
3304 if (LI->isVolatile()) return getCouldNotCompute();
3306 // Check to see if the loaded pointer is a getelementptr of a global.
3307 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(LI->getOperand(0));
3308 if (!GEP) return getCouldNotCompute();
3310 // Make sure that it is really a constant global we are gepping, with an
3311 // initializer, and make sure the first IDX is really 0.
3312 GlobalVariable *GV = dyn_cast<GlobalVariable>(GEP->getOperand(0));
3313 if (!GV || !GV->isConstant() || !GV->hasInitializer() ||
3314 GEP->getNumOperands() < 3 || !isa<Constant>(GEP->getOperand(1)) ||
3315 !cast<Constant>(GEP->getOperand(1))->isNullValue())
3316 return getCouldNotCompute();
3318 // Okay, we allow one non-constant index into the GEP instruction.
3320 std::vector<ConstantInt*> Indexes;
3321 unsigned VarIdxNum = 0;
3322 for (unsigned i = 2, e = GEP->getNumOperands(); i != e; ++i)
3323 if (ConstantInt *CI = dyn_cast<ConstantInt>(GEP->getOperand(i))) {
3324 Indexes.push_back(CI);
3325 } else if (!isa<ConstantInt>(GEP->getOperand(i))) {
3326 if (VarIdx) return getCouldNotCompute(); // Multiple non-constant idx's.
3327 VarIdx = GEP->getOperand(i);
3329 Indexes.push_back(0);
3332 // Okay, we know we have a (load (gep GV, 0, X)) comparison with a constant.
3333 // Check to see if X is a loop variant variable value now.
3334 const SCEV *Idx = getSCEV(VarIdx);
3335 Idx = getSCEVAtScope(Idx, L);
3337 // We can only recognize very limited forms of loop index expressions, in
3338 // particular, only affine AddRec's like {C1,+,C2}.
3339 const SCEVAddRecExpr *IdxExpr = dyn_cast<SCEVAddRecExpr>(Idx);
3340 if (!IdxExpr || !IdxExpr->isAffine() || IdxExpr->isLoopInvariant(L) ||
3341 !isa<SCEVConstant>(IdxExpr->getOperand(0)) ||
3342 !isa<SCEVConstant>(IdxExpr->getOperand(1)))
3343 return getCouldNotCompute();
3345 unsigned MaxSteps = MaxBruteForceIterations;
3346 for (unsigned IterationNum = 0; IterationNum != MaxSteps; ++IterationNum) {
3347 ConstantInt *ItCst =
3348 ConstantInt::get(cast<IntegerType>(IdxExpr->getType()), IterationNum);
3349 ConstantInt *Val = EvaluateConstantChrecAtConstant(IdxExpr, ItCst, *this);
3351 // Form the GEP offset.
3352 Indexes[VarIdxNum] = Val;
3354 Constant *Result = GetAddressedElementFromGlobal(Context, GV, Indexes);
3355 if (Result == 0) break; // Cannot compute!
3357 // Evaluate the condition for this iteration.
3358 Result = ConstantExpr::getICmp(predicate, Result, RHS);
3359 if (!isa<ConstantInt>(Result)) break; // Couldn't decide for sure
3360 if (cast<ConstantInt>(Result)->getValue().isMinValue()) {
3362 errs() << "\n***\n*** Computed loop count " << *ItCst
3363 << "\n*** From global " << *GV << "*** BB: " << *L->getHeader()
3366 ++NumArrayLenItCounts;
3367 return getConstant(ItCst); // Found terminating iteration!
3370 return getCouldNotCompute();
3374 /// CanConstantFold - Return true if we can constant fold an instruction of the
3375 /// specified type, assuming that all operands were constants.
3376 static bool CanConstantFold(const Instruction *I) {
3377 if (isa<BinaryOperator>(I) || isa<CmpInst>(I) ||
3378 isa<SelectInst>(I) || isa<CastInst>(I) || isa<GetElementPtrInst>(I))
3381 if (const CallInst *CI = dyn_cast<CallInst>(I))
3382 if (const Function *F = CI->getCalledFunction())
3383 return canConstantFoldCallTo(F);
3387 /// getConstantEvolvingPHI - Given an LLVM value and a loop, return a PHI node
3388 /// in the loop that V is derived from. We allow arbitrary operations along the
3389 /// way, but the operands of an operation must either be constants or a value
3390 /// derived from a constant PHI. If this expression does not fit with these
3391 /// constraints, return null.
3392 static PHINode *getConstantEvolvingPHI(Value *V, const Loop *L) {
3393 // If this is not an instruction, or if this is an instruction outside of the
3394 // loop, it can't be derived from a loop PHI.
3395 Instruction *I = dyn_cast<Instruction>(V);
3396 if (I == 0 || !L->contains(I->getParent())) return 0;
3398 if (PHINode *PN = dyn_cast<PHINode>(I)) {
3399 if (L->getHeader() == I->getParent())
3402 // We don't currently keep track of the control flow needed to evaluate
3403 // PHIs, so we cannot handle PHIs inside of loops.
3407 // If we won't be able to constant fold this expression even if the operands
3408 // are constants, return early.
3409 if (!CanConstantFold(I)) return 0;
3411 // Otherwise, we can evaluate this instruction if all of its operands are
3412 // constant or derived from a PHI node themselves.
3414 for (unsigned Op = 0, e = I->getNumOperands(); Op != e; ++Op)
3415 if (!(isa<Constant>(I->getOperand(Op)) ||
3416 isa<GlobalValue>(I->getOperand(Op)))) {
3417 PHINode *P = getConstantEvolvingPHI(I->getOperand(Op), L);
3418 if (P == 0) return 0; // Not evolving from PHI
3422 return 0; // Evolving from multiple different PHIs.
3425 // This is a expression evolving from a constant PHI!
3429 /// EvaluateExpression - Given an expression that passes the
3430 /// getConstantEvolvingPHI predicate, evaluate its value assuming the PHI node
3431 /// in the loop has the value PHIVal. If we can't fold this expression for some
3432 /// reason, return null.
3433 static Constant *EvaluateExpression(Value *V, Constant *PHIVal) {
3434 if (isa<PHINode>(V)) return PHIVal;
3435 if (Constant *C = dyn_cast<Constant>(V)) return C;
3436 if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) return GV;
3437 Instruction *I = cast<Instruction>(V);
3438 LLVMContext *Context = I->getParent()->getContext();
3440 std::vector<Constant*> Operands;
3441 Operands.resize(I->getNumOperands());
3443 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3444 Operands[i] = EvaluateExpression(I->getOperand(i), PHIVal);
3445 if (Operands[i] == 0) return 0;
3448 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3449 return ConstantFoldCompareInstOperands(CI->getPredicate(),
3450 &Operands[0], Operands.size(),
3453 return ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3454 &Operands[0], Operands.size(),
3458 /// getConstantEvolutionLoopExitValue - If we know that the specified Phi is
3459 /// in the header of its containing loop, we know the loop executes a
3460 /// constant number of times, and the PHI node is just a recurrence
3461 /// involving constants, fold it.
3463 ScalarEvolution::getConstantEvolutionLoopExitValue(PHINode *PN,
3466 std::map<PHINode*, Constant*>::iterator I =
3467 ConstantEvolutionLoopExitValue.find(PN);
3468 if (I != ConstantEvolutionLoopExitValue.end())
3471 if (BEs.ugt(APInt(BEs.getBitWidth(),MaxBruteForceIterations)))
3472 return ConstantEvolutionLoopExitValue[PN] = 0; // Not going to evaluate it.
3474 Constant *&RetVal = ConstantEvolutionLoopExitValue[PN];
3476 // Since the loop is canonicalized, the PHI node must have two entries. One
3477 // entry must be a constant (coming in from outside of the loop), and the
3478 // second must be derived from the same PHI.
3479 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3480 Constant *StartCST =
3481 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3483 return RetVal = 0; // Must be a constant.
3485 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3486 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3488 return RetVal = 0; // Not derived from same PHI.
3490 // Execute the loop symbolically to determine the exit value.
3491 if (BEs.getActiveBits() >= 32)
3492 return RetVal = 0; // More than 2^32-1 iterations?? Not doing it!
3494 unsigned NumIterations = BEs.getZExtValue(); // must be in range
3495 unsigned IterationNum = 0;
3496 for (Constant *PHIVal = StartCST; ; ++IterationNum) {
3497 if (IterationNum == NumIterations)
3498 return RetVal = PHIVal; // Got exit value!
3500 // Compute the value of the PHI node for the next iteration.
3501 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3502 if (NextPHI == PHIVal)
3503 return RetVal = NextPHI; // Stopped evolving!
3505 return 0; // Couldn't evaluate!
3510 /// ComputeBackedgeTakenCountExhaustively - If the trip is known to execute a
3511 /// constant number of times (the condition evolves only from constants),
3512 /// try to evaluate a few iterations of the loop until we get the exit
3513 /// condition gets a value of ExitWhen (true or false). If we cannot
3514 /// evaluate the trip count of the loop, return getCouldNotCompute().
3516 ScalarEvolution::ComputeBackedgeTakenCountExhaustively(const Loop *L,
3519 PHINode *PN = getConstantEvolvingPHI(Cond, L);
3520 if (PN == 0) return getCouldNotCompute();
3522 // Since the loop is canonicalized, the PHI node must have two entries. One
3523 // entry must be a constant (coming in from outside of the loop), and the
3524 // second must be derived from the same PHI.
3525 bool SecondIsBackedge = L->contains(PN->getIncomingBlock(1));
3526 Constant *StartCST =
3527 dyn_cast<Constant>(PN->getIncomingValue(!SecondIsBackedge));
3528 if (StartCST == 0) return getCouldNotCompute(); // Must be a constant.
3530 Value *BEValue = PN->getIncomingValue(SecondIsBackedge);
3531 PHINode *PN2 = getConstantEvolvingPHI(BEValue, L);
3532 if (PN2 != PN) return getCouldNotCompute(); // Not derived from same PHI.
3534 // Okay, we find a PHI node that defines the trip count of this loop. Execute
3535 // the loop symbolically to determine when the condition gets a value of
3537 unsigned IterationNum = 0;
3538 unsigned MaxIterations = MaxBruteForceIterations; // Limit analysis.
3539 for (Constant *PHIVal = StartCST;
3540 IterationNum != MaxIterations; ++IterationNum) {
3541 ConstantInt *CondVal =
3542 dyn_cast_or_null<ConstantInt>(EvaluateExpression(Cond, PHIVal));
3544 // Couldn't symbolically evaluate.
3545 if (!CondVal) return getCouldNotCompute();
3547 if (CondVal->getValue() == uint64_t(ExitWhen)) {
3548 ++NumBruteForceTripCountsComputed;
3549 return getConstant(Type::Int32Ty, IterationNum);
3552 // Compute the value of the PHI node for the next iteration.
3553 Constant *NextPHI = EvaluateExpression(BEValue, PHIVal);
3554 if (NextPHI == 0 || NextPHI == PHIVal)
3555 return getCouldNotCompute();// Couldn't evaluate or not making progress...
3559 // Too many iterations were needed to evaluate.
3560 return getCouldNotCompute();
3563 /// getSCEVAtScope - Return a SCEV expression handle for the specified value
3564 /// at the specified scope in the program. The L value specifies a loop
3565 /// nest to evaluate the expression at, where null is the top-level or a
3566 /// specified loop is immediately inside of the loop.
3568 /// This method can be used to compute the exit value for a variable defined
3569 /// in a loop by querying what the value will hold in the parent loop.
3571 /// In the case that a relevant loop exit value cannot be computed, the
3572 /// original value V is returned.
3573 const SCEV *ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) {
3574 // FIXME: this should be turned into a virtual method on SCEV!
3576 if (isa<SCEVConstant>(V)) return V;
3578 // If this instruction is evolved from a constant-evolving PHI, compute the
3579 // exit value from the loop without using SCEVs.
3580 if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V)) {
3581 if (Instruction *I = dyn_cast<Instruction>(SU->getValue())) {
3582 const Loop *LI = (*this->LI)[I->getParent()];
3583 if (LI && LI->getParentLoop() == L) // Looking for loop exit value.
3584 if (PHINode *PN = dyn_cast<PHINode>(I))
3585 if (PN->getParent() == LI->getHeader()) {
3586 // Okay, there is no closed form solution for the PHI node. Check
3587 // to see if the loop that contains it has a known backedge-taken
3588 // count. If so, we may be able to force computation of the exit
3590 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(LI);
3591 if (const SCEVConstant *BTCC =
3592 dyn_cast<SCEVConstant>(BackedgeTakenCount)) {
3593 // Okay, we know how many times the containing loop executes. If
3594 // this is a constant evolving PHI node, get the final value at
3595 // the specified iteration number.
3596 Constant *RV = getConstantEvolutionLoopExitValue(PN,
3597 BTCC->getValue()->getValue(),
3599 if (RV) return getSCEV(RV);
3603 // Okay, this is an expression that we cannot symbolically evaluate
3604 // into a SCEV. Check to see if it's possible to symbolically evaluate
3605 // the arguments into constants, and if so, try to constant propagate the
3606 // result. This is particularly useful for computing loop exit values.
3607 if (CanConstantFold(I)) {
3608 // Check to see if we've folded this instruction at this loop before.
3609 std::map<const Loop *, Constant *> &Values = ValuesAtScopes[I];
3610 std::pair<std::map<const Loop *, Constant *>::iterator, bool> Pair =
3611 Values.insert(std::make_pair(L, static_cast<Constant *>(0)));
3613 return Pair.first->second ? &*getSCEV(Pair.first->second) : V;
3615 std::vector<Constant*> Operands;
3616 Operands.reserve(I->getNumOperands());
3617 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i) {
3618 Value *Op = I->getOperand(i);
3619 if (Constant *C = dyn_cast<Constant>(Op)) {
3620 Operands.push_back(C);
3622 // If any of the operands is non-constant and if they are
3623 // non-integer and non-pointer, don't even try to analyze them
3624 // with scev techniques.
3625 if (!isSCEVable(Op->getType()))
3628 const SCEV *OpV = getSCEVAtScope(getSCEV(Op), L);
3629 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(OpV)) {
3630 Constant *C = SC->getValue();
3631 if (C->getType() != Op->getType())
3632 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3636 Operands.push_back(C);
3637 } else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(OpV)) {
3638 if (Constant *C = dyn_cast<Constant>(SU->getValue())) {
3639 if (C->getType() != Op->getType())
3641 ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3645 Operands.push_back(C);
3655 if (const CmpInst *CI = dyn_cast<CmpInst>(I))
3656 C = ConstantFoldCompareInstOperands(CI->getPredicate(),
3657 &Operands[0], Operands.size(),
3660 C = ConstantFoldInstOperands(I->getOpcode(), I->getType(),
3661 &Operands[0], Operands.size(), Context);
3662 Pair.first->second = C;
3667 // This is some other type of SCEVUnknown, just return it.
3671 if (const SCEVCommutativeExpr *Comm = dyn_cast<SCEVCommutativeExpr>(V)) {
3672 // Avoid performing the look-up in the common case where the specified
3673 // expression has no loop-variant portions.
3674 for (unsigned i = 0, e = Comm->getNumOperands(); i != e; ++i) {
3675 const SCEV *OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3676 if (OpAtScope != Comm->getOperand(i)) {
3677 // Okay, at least one of these operands is loop variant but might be
3678 // foldable. Build a new instance of the folded commutative expression.
3679 SmallVector<const SCEV *, 8> NewOps(Comm->op_begin(),
3680 Comm->op_begin()+i);
3681 NewOps.push_back(OpAtScope);
3683 for (++i; i != e; ++i) {
3684 OpAtScope = getSCEVAtScope(Comm->getOperand(i), L);
3685 NewOps.push_back(OpAtScope);
3687 if (isa<SCEVAddExpr>(Comm))
3688 return getAddExpr(NewOps);
3689 if (isa<SCEVMulExpr>(Comm))
3690 return getMulExpr(NewOps);
3691 if (isa<SCEVSMaxExpr>(Comm))
3692 return getSMaxExpr(NewOps);
3693 if (isa<SCEVUMaxExpr>(Comm))
3694 return getUMaxExpr(NewOps);
3695 LLVM_UNREACHABLE("Unknown commutative SCEV type!");
3698 // If we got here, all operands are loop invariant.
3702 if (const SCEVUDivExpr *Div = dyn_cast<SCEVUDivExpr>(V)) {
3703 const SCEV *LHS = getSCEVAtScope(Div->getLHS(), L);
3704 const SCEV *RHS = getSCEVAtScope(Div->getRHS(), L);
3705 if (LHS == Div->getLHS() && RHS == Div->getRHS())
3706 return Div; // must be loop invariant
3707 return getUDivExpr(LHS, RHS);
3710 // If this is a loop recurrence for a loop that does not contain L, then we
3711 // are dealing with the final value computed by the loop.
3712 if (const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V)) {
3713 if (!L || !AddRec->getLoop()->contains(L->getHeader())) {
3714 // To evaluate this recurrence, we need to know how many times the AddRec
3715 // loop iterates. Compute this now.
3716 const SCEV *BackedgeTakenCount = getBackedgeTakenCount(AddRec->getLoop());
3717 if (BackedgeTakenCount == getCouldNotCompute()) return AddRec;
3719 // Then, evaluate the AddRec.
3720 return AddRec->evaluateAtIteration(BackedgeTakenCount, *this);
3725 if (const SCEVZeroExtendExpr *Cast = dyn_cast<SCEVZeroExtendExpr>(V)) {
3726 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3727 if (Op == Cast->getOperand())
3728 return Cast; // must be loop invariant
3729 return getZeroExtendExpr(Op, Cast->getType());
3732 if (const SCEVSignExtendExpr *Cast = dyn_cast<SCEVSignExtendExpr>(V)) {
3733 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3734 if (Op == Cast->getOperand())
3735 return Cast; // must be loop invariant
3736 return getSignExtendExpr(Op, Cast->getType());
3739 if (const SCEVTruncateExpr *Cast = dyn_cast<SCEVTruncateExpr>(V)) {
3740 const SCEV *Op = getSCEVAtScope(Cast->getOperand(), L);
3741 if (Op == Cast->getOperand())
3742 return Cast; // must be loop invariant
3743 return getTruncateExpr(Op, Cast->getType());
3746 LLVM_UNREACHABLE("Unknown SCEV type!");
3750 /// getSCEVAtScope - This is a convenience function which does
3751 /// getSCEVAtScope(getSCEV(V), L).
3752 const SCEV *ScalarEvolution::getSCEVAtScope(Value *V, const Loop *L) {
3753 return getSCEVAtScope(getSCEV(V), L);
3756 /// SolveLinEquationWithOverflow - Finds the minimum unsigned root of the
3757 /// following equation:
3759 /// A * X = B (mod N)
3761 /// where N = 2^BW and BW is the common bit width of A and B. The signedness of
3762 /// A and B isn't important.
3764 /// If the equation does not have a solution, SCEVCouldNotCompute is returned.
3765 static const SCEV *SolveLinEquationWithOverflow(const APInt &A, const APInt &B,
3766 ScalarEvolution &SE) {
3767 uint32_t BW = A.getBitWidth();
3768 assert(BW == B.getBitWidth() && "Bit widths must be the same.");
3769 assert(A != 0 && "A must be non-zero.");
3773 // The gcd of A and N may have only one prime factor: 2. The number of
3774 // trailing zeros in A is its multiplicity
3775 uint32_t Mult2 = A.countTrailingZeros();
3778 // 2. Check if B is divisible by D.
3780 // B is divisible by D if and only if the multiplicity of prime factor 2 for B
3781 // is not less than multiplicity of this prime factor for D.
3782 if (B.countTrailingZeros() < Mult2)
3783 return SE.getCouldNotCompute();
3785 // 3. Compute I: the multiplicative inverse of (A / D) in arithmetic
3788 // (N / D) may need BW+1 bits in its representation. Hence, we'll use this
3789 // bit width during computations.
3790 APInt AD = A.lshr(Mult2).zext(BW + 1); // AD = A / D
3791 APInt Mod(BW + 1, 0);
3792 Mod.set(BW - Mult2); // Mod = N / D
3793 APInt I = AD.multiplicativeInverse(Mod);
3795 // 4. Compute the minimum unsigned root of the equation:
3796 // I * (B / D) mod (N / D)
3797 APInt Result = (I * B.lshr(Mult2).zext(BW + 1)).urem(Mod);
3799 // The result is guaranteed to be less than 2^BW so we may truncate it to BW
3801 return SE.getConstant(Result.trunc(BW));
3804 /// SolveQuadraticEquation - Find the roots of the quadratic equation for the
3805 /// given quadratic chrec {L,+,M,+,N}. This returns either the two roots (which
3806 /// might be the same) or two SCEVCouldNotCompute objects.
3808 static std::pair<const SCEV *,const SCEV *>
3809 SolveQuadraticEquation(const SCEVAddRecExpr *AddRec, ScalarEvolution &SE) {
3810 assert(AddRec->getNumOperands() == 3 && "This is not a quadratic chrec!");
3811 const SCEVConstant *LC = dyn_cast<SCEVConstant>(AddRec->getOperand(0));
3812 const SCEVConstant *MC = dyn_cast<SCEVConstant>(AddRec->getOperand(1));
3813 const SCEVConstant *NC = dyn_cast<SCEVConstant>(AddRec->getOperand(2));
3815 // We currently can only solve this if the coefficients are constants.
3816 if (!LC || !MC || !NC) {
3817 const SCEV *CNC = SE.getCouldNotCompute();
3818 return std::make_pair(CNC, CNC);
3821 uint32_t BitWidth = LC->getValue()->getValue().getBitWidth();
3822 const APInt &L = LC->getValue()->getValue();
3823 const APInt &M = MC->getValue()->getValue();
3824 const APInt &N = NC->getValue()->getValue();
3825 APInt Two(BitWidth, 2);
3826 APInt Four(BitWidth, 4);
3829 using namespace APIntOps;
3831 // Convert from chrec coefficients to polynomial coefficients AX^2+BX+C
3832 // The B coefficient is M-N/2
3836 // The A coefficient is N/2
3837 APInt A(N.sdiv(Two));
3839 // Compute the B^2-4ac term.
3842 SqrtTerm -= Four * (A * C);
3844 // Compute sqrt(B^2-4ac). This is guaranteed to be the nearest
3845 // integer value or else APInt::sqrt() will assert.
3846 APInt SqrtVal(SqrtTerm.sqrt());
3848 // Compute the two solutions for the quadratic formula.
3849 // The divisions must be performed as signed divisions.
3851 APInt TwoA( A << 1 );
3852 if (TwoA.isMinValue()) {
3853 const SCEV *CNC = SE.getCouldNotCompute();
3854 return std::make_pair(CNC, CNC);
3857 LLVMContext *Context = SE.getContext();
3859 ConstantInt *Solution1 =
3860 Context->getConstantInt((NegB + SqrtVal).sdiv(TwoA));
3861 ConstantInt *Solution2 =
3862 Context->getConstantInt((NegB - SqrtVal).sdiv(TwoA));
3864 return std::make_pair(SE.getConstant(Solution1),
3865 SE.getConstant(Solution2));
3866 } // end APIntOps namespace
3869 /// HowFarToZero - Return the number of times a backedge comparing the specified
3870 /// value to zero will execute. If not computable, return CouldNotCompute.
3871 const SCEV *ScalarEvolution::HowFarToZero(const SCEV *V, const Loop *L) {
3872 // If the value is a constant
3873 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3874 // If the value is already zero, the branch will execute zero times.
3875 if (C->getValue()->isZero()) return C;
3876 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3879 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(V);
3880 if (!AddRec || AddRec->getLoop() != L)
3881 return getCouldNotCompute();
3883 if (AddRec->isAffine()) {
3884 // If this is an affine expression, the execution count of this branch is
3885 // the minimum unsigned root of the following equation:
3887 // Start + Step*N = 0 (mod 2^BW)
3891 // Step*N = -Start (mod 2^BW)
3893 // where BW is the common bit width of Start and Step.
3895 // Get the initial value for the loop.
3896 const SCEV *Start = getSCEVAtScope(AddRec->getStart(),
3897 L->getParentLoop());
3898 const SCEV *Step = getSCEVAtScope(AddRec->getOperand(1),
3899 L->getParentLoop());
3901 if (const SCEVConstant *StepC = dyn_cast<SCEVConstant>(Step)) {
3902 // For now we handle only constant steps.
3904 // First, handle unitary steps.
3905 if (StepC->getValue()->equalsInt(1)) // 1*N = -Start (mod 2^BW), so:
3906 return getNegativeSCEV(Start); // N = -Start (as unsigned)
3907 if (StepC->getValue()->isAllOnesValue()) // -1*N = -Start (mod 2^BW), so:
3908 return Start; // N = Start (as unsigned)
3910 // Then, try to solve the above equation provided that Start is constant.
3911 if (const SCEVConstant *StartC = dyn_cast<SCEVConstant>(Start))
3912 return SolveLinEquationWithOverflow(StepC->getValue()->getValue(),
3913 -StartC->getValue()->getValue(),
3916 } else if (AddRec->isQuadratic() && AddRec->getType()->isInteger()) {
3917 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of
3918 // the quadratic equation to solve it.
3919 std::pair<const SCEV *,const SCEV *> Roots = SolveQuadraticEquation(AddRec,
3921 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
3922 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
3925 errs() << "HFTZ: " << *V << " - sol#1: " << *R1
3926 << " sol#2: " << *R2 << "\n";
3928 // Pick the smallest positive root value.
3929 if (ConstantInt *CB =
3930 dyn_cast<ConstantInt>(Context->getConstantExprICmp(ICmpInst::ICMP_ULT,
3931 R1->getValue(), R2->getValue()))) {
3932 if (CB->getZExtValue() == false)
3933 std::swap(R1, R2); // R1 is the minimum root now.
3935 // We can only use this value if the chrec ends up with an exact zero
3936 // value at this index. When solving for "X*X != 5", for example, we
3937 // should not accept a root of 2.
3938 const SCEV *Val = AddRec->evaluateAtIteration(R1, *this);
3940 return R1; // We found a quadratic root!
3945 return getCouldNotCompute();
3948 /// HowFarToNonZero - Return the number of times a backedge checking the
3949 /// specified value for nonzero will execute. If not computable, return
3951 const SCEV *ScalarEvolution::HowFarToNonZero(const SCEV *V, const Loop *L) {
3952 // Loops that look like: while (X == 0) are very strange indeed. We don't
3953 // handle them yet except for the trivial case. This could be expanded in the
3954 // future as needed.
3956 // If the value is a constant, check to see if it is known to be non-zero
3957 // already. If so, the backedge will execute zero times.
3958 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(V)) {
3959 if (!C->getValue()->isNullValue())
3960 return getIntegerSCEV(0, C->getType());
3961 return getCouldNotCompute(); // Otherwise it will loop infinitely.
3964 // We could implement others, but I really doubt anyone writes loops like
3965 // this, and if they did, they would already be constant folded.
3966 return getCouldNotCompute();
3969 /// getLoopPredecessor - If the given loop's header has exactly one unique
3970 /// predecessor outside the loop, return it. Otherwise return null.
3972 BasicBlock *ScalarEvolution::getLoopPredecessor(const Loop *L) {
3973 BasicBlock *Header = L->getHeader();
3974 BasicBlock *Pred = 0;
3975 for (pred_iterator PI = pred_begin(Header), E = pred_end(Header);
3977 if (!L->contains(*PI)) {
3978 if (Pred && Pred != *PI) return 0; // Multiple predecessors.
3984 /// getPredecessorWithUniqueSuccessorForBB - Return a predecessor of BB
3985 /// (which may not be an immediate predecessor) which has exactly one
3986 /// successor from which BB is reachable, or null if no such block is
3990 ScalarEvolution::getPredecessorWithUniqueSuccessorForBB(BasicBlock *BB) {
3991 // If the block has a unique predecessor, then there is no path from the
3992 // predecessor to the block that does not go through the direct edge
3993 // from the predecessor to the block.
3994 if (BasicBlock *Pred = BB->getSinglePredecessor())
3997 // A loop's header is defined to be a block that dominates the loop.
3998 // If the header has a unique predecessor outside the loop, it must be
3999 // a block that has exactly one successor that can reach the loop.
4000 if (Loop *L = LI->getLoopFor(BB))
4001 return getLoopPredecessor(L);
4006 /// HasSameValue - SCEV structural equivalence is usually sufficient for
4007 /// testing whether two expressions are equal, however for the purposes of
4008 /// looking for a condition guarding a loop, it can be useful to be a little
4009 /// more general, since a front-end may have replicated the controlling
4012 static bool HasSameValue(const SCEV *A, const SCEV *B) {
4013 // Quick check to see if they are the same SCEV.
4014 if (A == B) return true;
4016 // Otherwise, if they're both SCEVUnknown, it's possible that they hold
4017 // two different instructions with the same value. Check for this case.
4018 if (const SCEVUnknown *AU = dyn_cast<SCEVUnknown>(A))
4019 if (const SCEVUnknown *BU = dyn_cast<SCEVUnknown>(B))
4020 if (const Instruction *AI = dyn_cast<Instruction>(AU->getValue()))
4021 if (const Instruction *BI = dyn_cast<Instruction>(BU->getValue()))
4022 if (AI->isIdenticalTo(BI))
4025 // Otherwise assume they may have a different value.
4029 /// isLoopGuardedByCond - Test whether entry to the loop is protected by
4030 /// a conditional between LHS and RHS. This is used to help avoid max
4031 /// expressions in loop trip counts.
4032 bool ScalarEvolution::isLoopGuardedByCond(const Loop *L,
4033 ICmpInst::Predicate Pred,
4034 const SCEV *LHS, const SCEV *RHS) {
4035 // Interpret a null as meaning no loop, where there is obviously no guard
4036 // (interprocedural conditions notwithstanding).
4037 if (!L) return false;
4039 BasicBlock *Predecessor = getLoopPredecessor(L);
4040 BasicBlock *PredecessorDest = L->getHeader();
4042 // Starting at the loop predecessor, climb up the predecessor chain, as long
4043 // as there are predecessors that can be found that have unique successors
4044 // leading to the original header.
4046 PredecessorDest = Predecessor,
4047 Predecessor = getPredecessorWithUniqueSuccessorForBB(Predecessor)) {
4049 BranchInst *LoopEntryPredicate =
4050 dyn_cast<BranchInst>(Predecessor->getTerminator());
4051 if (!LoopEntryPredicate ||
4052 LoopEntryPredicate->isUnconditional())
4055 if (isNecessaryCond(LoopEntryPredicate->getCondition(), Pred, LHS, RHS,
4056 LoopEntryPredicate->getSuccessor(0) != PredecessorDest))
4063 /// isNecessaryCond - Test whether the given CondValue value is a condition
4064 /// which is at least as strict as the one described by Pred, LHS, and RHS.
4065 bool ScalarEvolution::isNecessaryCond(Value *CondValue,
4066 ICmpInst::Predicate Pred,
4067 const SCEV *LHS, const SCEV *RHS,
4069 // Recursivly handle And and Or conditions.
4070 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(CondValue)) {
4071 if (BO->getOpcode() == Instruction::And) {
4073 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4074 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4075 } else if (BO->getOpcode() == Instruction::Or) {
4077 return isNecessaryCond(BO->getOperand(0), Pred, LHS, RHS, Inverse) ||
4078 isNecessaryCond(BO->getOperand(1), Pred, LHS, RHS, Inverse);
4082 ICmpInst *ICI = dyn_cast<ICmpInst>(CondValue);
4083 if (!ICI) return false;
4085 // Now that we found a conditional branch that dominates the loop, check to
4086 // see if it is the comparison we are looking for.
4087 Value *PreCondLHS = ICI->getOperand(0);
4088 Value *PreCondRHS = ICI->getOperand(1);
4089 ICmpInst::Predicate Cond;
4091 Cond = ICI->getInversePredicate();
4093 Cond = ICI->getPredicate();
4096 ; // An exact match.
4097 else if (!ICmpInst::isTrueWhenEqual(Cond) && Pred == ICmpInst::ICMP_NE)
4098 ; // The actual condition is beyond sufficient.
4100 // Check a few special cases.
4102 case ICmpInst::ICMP_UGT:
4103 if (Pred == ICmpInst::ICMP_ULT) {
4104 std::swap(PreCondLHS, PreCondRHS);
4105 Cond = ICmpInst::ICMP_ULT;
4109 case ICmpInst::ICMP_SGT:
4110 if (Pred == ICmpInst::ICMP_SLT) {
4111 std::swap(PreCondLHS, PreCondRHS);
4112 Cond = ICmpInst::ICMP_SLT;
4116 case ICmpInst::ICMP_NE:
4117 // Expressions like (x >u 0) are often canonicalized to (x != 0),
4118 // so check for this case by checking if the NE is comparing against
4119 // a minimum or maximum constant.
4120 if (!ICmpInst::isTrueWhenEqual(Pred))
4121 if (ConstantInt *CI = dyn_cast<ConstantInt>(PreCondRHS)) {
4122 const APInt &A = CI->getValue();
4124 case ICmpInst::ICMP_SLT:
4125 if (A.isMaxSignedValue()) break;
4127 case ICmpInst::ICMP_SGT:
4128 if (A.isMinSignedValue()) break;
4130 case ICmpInst::ICMP_ULT:
4131 if (A.isMaxValue()) break;
4133 case ICmpInst::ICMP_UGT:
4134 if (A.isMinValue()) break;
4139 Cond = ICmpInst::ICMP_NE;
4140 // NE is symmetric but the original comparison may not be. Swap
4141 // the operands if necessary so that they match below.
4142 if (isa<SCEVConstant>(LHS))
4143 std::swap(PreCondLHS, PreCondRHS);
4148 // We weren't able to reconcile the condition.
4152 if (!PreCondLHS->getType()->isInteger()) return false;
4154 const SCEV *PreCondLHSSCEV = getSCEV(PreCondLHS);
4155 const SCEV *PreCondRHSSCEV = getSCEV(PreCondRHS);
4156 return (HasSameValue(LHS, PreCondLHSSCEV) &&
4157 HasSameValue(RHS, PreCondRHSSCEV)) ||
4158 (HasSameValue(LHS, getNotSCEV(PreCondRHSSCEV)) &&
4159 HasSameValue(RHS, getNotSCEV(PreCondLHSSCEV)));
4162 /// getBECount - Subtract the end and start values and divide by the step,
4163 /// rounding up, to get the number of times the backedge is executed. Return
4164 /// CouldNotCompute if an intermediate computation overflows.
4165 const SCEV *ScalarEvolution::getBECount(const SCEV *Start,
4168 const Type *Ty = Start->getType();
4169 const SCEV *NegOne = getIntegerSCEV(-1, Ty);
4170 const SCEV *Diff = getMinusSCEV(End, Start);
4171 const SCEV *RoundUp = getAddExpr(Step, NegOne);
4173 // Add an adjustment to the difference between End and Start so that
4174 // the division will effectively round up.
4175 const SCEV *Add = getAddExpr(Diff, RoundUp);
4177 // Check Add for unsigned overflow.
4178 // TODO: More sophisticated things could be done here.
4179 const Type *WideTy = Context->getIntegerType(getTypeSizeInBits(Ty) + 1);
4180 const SCEV *OperandExtendedAdd =
4181 getAddExpr(getZeroExtendExpr(Diff, WideTy),
4182 getZeroExtendExpr(RoundUp, WideTy));
4183 if (getZeroExtendExpr(Add, WideTy) != OperandExtendedAdd)
4184 return getCouldNotCompute();
4186 return getUDivExpr(Add, Step);
4189 /// HowManyLessThans - Return the number of times a backedge containing the
4190 /// specified less-than comparison will execute. If not computable, return
4191 /// CouldNotCompute.
4192 ScalarEvolution::BackedgeTakenInfo
4193 ScalarEvolution::HowManyLessThans(const SCEV *LHS, const SCEV *RHS,
4194 const Loop *L, bool isSigned) {
4195 // Only handle: "ADDREC < LoopInvariant".
4196 if (!RHS->isLoopInvariant(L)) return getCouldNotCompute();
4198 const SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(LHS);
4199 if (!AddRec || AddRec->getLoop() != L)
4200 return getCouldNotCompute();
4202 if (AddRec->isAffine()) {
4203 // FORNOW: We only support unit strides.
4204 unsigned BitWidth = getTypeSizeInBits(AddRec->getType());
4205 const SCEV *Step = AddRec->getStepRecurrence(*this);
4207 // TODO: handle non-constant strides.
4208 const SCEVConstant *CStep = dyn_cast<SCEVConstant>(Step);
4209 if (!CStep || CStep->isZero())
4210 return getCouldNotCompute();
4211 if (CStep->isOne()) {
4212 // With unit stride, the iteration never steps past the limit value.
4213 } else if (CStep->getValue()->getValue().isStrictlyPositive()) {
4214 if (const SCEVConstant *CLimit = dyn_cast<SCEVConstant>(RHS)) {
4215 // Test whether a positive iteration iteration can step past the limit
4216 // value and past the maximum value for its type in a single step.
4218 APInt Max = APInt::getSignedMaxValue(BitWidth);
4219 if ((Max - CStep->getValue()->getValue())
4220 .slt(CLimit->getValue()->getValue()))
4221 return getCouldNotCompute();
4223 APInt Max = APInt::getMaxValue(BitWidth);
4224 if ((Max - CStep->getValue()->getValue())
4225 .ult(CLimit->getValue()->getValue()))
4226 return getCouldNotCompute();
4229 // TODO: handle non-constant limit values below.
4230 return getCouldNotCompute();
4232 // TODO: handle negative strides below.
4233 return getCouldNotCompute();
4235 // We know the LHS is of the form {n,+,s} and the RHS is some loop-invariant
4236 // m. So, we count the number of iterations in which {n,+,s} < m is true.
4237 // Note that we cannot simply return max(m-n,0)/s because it's not safe to
4238 // treat m-n as signed nor unsigned due to overflow possibility.
4240 // First, we get the value of the LHS in the first iteration: n
4241 const SCEV *Start = AddRec->getOperand(0);
4243 // Determine the minimum constant start value.
4244 const SCEV *MinStart = isa<SCEVConstant>(Start) ? Start :
4245 getConstant(isSigned ? APInt::getSignedMinValue(BitWidth) :
4246 APInt::getMinValue(BitWidth));
4248 // If we know that the condition is true in order to enter the loop,
4249 // then we know that it will run exactly (m-n)/s times. Otherwise, we
4250 // only know that it will execute (max(m,n)-n)/s times. In both cases,
4251 // the division must round up.
4252 const SCEV *End = RHS;
4253 if (!isLoopGuardedByCond(L,
4254 isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT,
4255 getMinusSCEV(Start, Step), RHS))
4256 End = isSigned ? getSMaxExpr(RHS, Start)
4257 : getUMaxExpr(RHS, Start);
4259 // Determine the maximum constant end value.
4260 const SCEV *MaxEnd =
4261 isa<SCEVConstant>(End) ? End :
4262 getConstant(isSigned ? APInt::getSignedMaxValue(BitWidth)
4263 .ashr(GetMinSignBits(End) - 1) :
4264 APInt::getMaxValue(BitWidth)
4265 .lshr(GetMinLeadingZeros(End)));
4267 // Finally, we subtract these two values and divide, rounding up, to get
4268 // the number of times the backedge is executed.
4269 const SCEV *BECount = getBECount(Start, End, Step);
4271 // The maximum backedge count is similar, except using the minimum start
4272 // value and the maximum end value.
4273 const SCEV *MaxBECount = getBECount(MinStart, MaxEnd, Step);
4275 return BackedgeTakenInfo(BECount, MaxBECount);
4278 return getCouldNotCompute();
4281 /// getNumIterationsInRange - Return the number of iterations of this loop that
4282 /// produce values in the specified constant range. Another way of looking at
4283 /// this is that it returns the first iteration number where the value is not in
4284 /// the condition, thus computing the exit count. If the iteration count can't
4285 /// be computed, an instance of SCEVCouldNotCompute is returned.
4286 const SCEV *SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range,
4287 ScalarEvolution &SE) const {
4288 if (Range.isFullSet()) // Infinite loop.
4289 return SE.getCouldNotCompute();
4291 // If the start is a non-zero constant, shift the range to simplify things.
4292 if (const SCEVConstant *SC = dyn_cast<SCEVConstant>(getStart()))
4293 if (!SC->getValue()->isZero()) {
4294 SmallVector<const SCEV *, 4> Operands(op_begin(), op_end());
4295 Operands[0] = SE.getIntegerSCEV(0, SC->getType());
4296 const SCEV *Shifted = SE.getAddRecExpr(Operands, getLoop());
4297 if (const SCEVAddRecExpr *ShiftedAddRec =
4298 dyn_cast<SCEVAddRecExpr>(Shifted))
4299 return ShiftedAddRec->getNumIterationsInRange(
4300 Range.subtract(SC->getValue()->getValue()), SE);
4301 // This is strange and shouldn't happen.
4302 return SE.getCouldNotCompute();
4305 // The only time we can solve this is when we have all constant indices.
4306 // Otherwise, we cannot determine the overflow conditions.
4307 for (unsigned i = 0, e = getNumOperands(); i != e; ++i)
4308 if (!isa<SCEVConstant>(getOperand(i)))
4309 return SE.getCouldNotCompute();
4312 // Okay at this point we know that all elements of the chrec are constants and
4313 // that the start element is zero.
4315 // First check to see if the range contains zero. If not, the first
4317 unsigned BitWidth = SE.getTypeSizeInBits(getType());
4318 if (!Range.contains(APInt(BitWidth, 0)))
4319 return SE.getIntegerSCEV(0, getType());
4322 // If this is an affine expression then we have this situation:
4323 // Solve {0,+,A} in Range === Ax in Range
4325 // We know that zero is in the range. If A is positive then we know that
4326 // the upper value of the range must be the first possible exit value.
4327 // If A is negative then the lower of the range is the last possible loop
4328 // value. Also note that we already checked for a full range.
4329 APInt One(BitWidth,1);
4330 APInt A = cast<SCEVConstant>(getOperand(1))->getValue()->getValue();
4331 APInt End = A.sge(One) ? (Range.getUpper() - One) : Range.getLower();
4333 // The exit value should be (End+A)/A.
4334 APInt ExitVal = (End + A).udiv(A);
4335 ConstantInt *ExitValue = SE.getContext()->getConstantInt(ExitVal);
4337 // Evaluate at the exit value. If we really did fall out of the valid
4338 // range, then we computed our trip count, otherwise wrap around or other
4339 // things must have happened.
4340 ConstantInt *Val = EvaluateConstantChrecAtConstant(this, ExitValue, SE);
4341 if (Range.contains(Val->getValue()))
4342 return SE.getCouldNotCompute(); // Something strange happened
4344 // Ensure that the previous value is in the range. This is a sanity check.
4345 assert(Range.contains(
4346 EvaluateConstantChrecAtConstant(this,
4347 SE.getContext()->getConstantInt(ExitVal - One), SE)->getValue()) &&
4348 "Linear scev computation is off in a bad way!");
4349 return SE.getConstant(ExitValue);
4350 } else if (isQuadratic()) {
4351 // If this is a quadratic (3-term) AddRec {L,+,M,+,N}, find the roots of the
4352 // quadratic equation to solve it. To do this, we must frame our problem in
4353 // terms of figuring out when zero is crossed, instead of when
4354 // Range.getUpper() is crossed.
4355 SmallVector<const SCEV *, 4> NewOps(op_begin(), op_end());
4356 NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper()));
4357 const SCEV *NewAddRec = SE.getAddRecExpr(NewOps, getLoop());
4359 // Next, solve the constructed addrec
4360 std::pair<const SCEV *,const SCEV *> Roots =
4361 SolveQuadraticEquation(cast<SCEVAddRecExpr>(NewAddRec), SE);
4362 const SCEVConstant *R1 = dyn_cast<SCEVConstant>(Roots.first);
4363 const SCEVConstant *R2 = dyn_cast<SCEVConstant>(Roots.second);
4365 // Pick the smallest positive root value.
4366 if (ConstantInt *CB =
4367 dyn_cast<ConstantInt>(
4368 SE.getContext()->getConstantExprICmp(ICmpInst::ICMP_ULT,
4369 R1->getValue(), R2->getValue()))) {
4370 if (CB->getZExtValue() == false)
4371 std::swap(R1, R2); // R1 is the minimum root now.
4373 // Make sure the root is not off by one. The returned iteration should
4374 // not be in the range, but the previous one should be. When solving
4375 // for "X*X < 5", for example, we should not return a root of 2.
4376 ConstantInt *R1Val = EvaluateConstantChrecAtConstant(this,
4379 if (Range.contains(R1Val->getValue())) {
4380 // The next iteration must be out of the range...
4381 ConstantInt *NextVal =
4382 SE.getContext()->getConstantInt(R1->getValue()->getValue()+1);
4384 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4385 if (!Range.contains(R1Val->getValue()))
4386 return SE.getConstant(NextVal);
4387 return SE.getCouldNotCompute(); // Something strange happened
4390 // If R1 was not in the range, then it is a good return value. Make
4391 // sure that R1-1 WAS in the range though, just in case.
4392 ConstantInt *NextVal =
4393 SE.getContext()->getConstantInt(R1->getValue()->getValue()-1);
4394 R1Val = EvaluateConstantChrecAtConstant(this, NextVal, SE);
4395 if (Range.contains(R1Val->getValue()))
4397 return SE.getCouldNotCompute(); // Something strange happened
4402 return SE.getCouldNotCompute();
4407 //===----------------------------------------------------------------------===//
4408 // SCEVCallbackVH Class Implementation
4409 //===----------------------------------------------------------------------===//
4411 void ScalarEvolution::SCEVCallbackVH::deleted() {
4412 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4413 if (PHINode *PN = dyn_cast<PHINode>(getValPtr()))
4414 SE->ConstantEvolutionLoopExitValue.erase(PN);
4415 if (Instruction *I = dyn_cast<Instruction>(getValPtr()))
4416 SE->ValuesAtScopes.erase(I);
4417 SE->Scalars.erase(getValPtr());
4418 // this now dangles!
4421 void ScalarEvolution::SCEVCallbackVH::allUsesReplacedWith(Value *) {
4422 assert(SE && "SCEVCallbackVH called with a non-null ScalarEvolution!");
4424 // Forget all the expressions associated with users of the old value,
4425 // so that future queries will recompute the expressions using the new
4427 SmallVector<User *, 16> Worklist;
4428 Value *Old = getValPtr();
4429 bool DeleteOld = false;
4430 for (Value::use_iterator UI = Old->use_begin(), UE = Old->use_end();
4432 Worklist.push_back(*UI);
4433 while (!Worklist.empty()) {
4434 User *U = Worklist.pop_back_val();
4435 // Deleting the Old value will cause this to dangle. Postpone
4436 // that until everything else is done.
4441 if (PHINode *PN = dyn_cast<PHINode>(U))
4442 SE->ConstantEvolutionLoopExitValue.erase(PN);
4443 if (Instruction *I = dyn_cast<Instruction>(U))
4444 SE->ValuesAtScopes.erase(I);
4445 if (SE->Scalars.erase(U))
4446 for (Value::use_iterator UI = U->use_begin(), UE = U->use_end();
4448 Worklist.push_back(*UI);
4451 if (PHINode *PN = dyn_cast<PHINode>(Old))
4452 SE->ConstantEvolutionLoopExitValue.erase(PN);
4453 if (Instruction *I = dyn_cast<Instruction>(Old))
4454 SE->ValuesAtScopes.erase(I);
4455 SE->Scalars.erase(Old);
4456 // this now dangles!
4461 ScalarEvolution::SCEVCallbackVH::SCEVCallbackVH(Value *V, ScalarEvolution *se)
4462 : CallbackVH(V), SE(se) {}
4464 //===----------------------------------------------------------------------===//
4465 // ScalarEvolution Class Implementation
4466 //===----------------------------------------------------------------------===//
4468 ScalarEvolution::ScalarEvolution()
4469 : FunctionPass(&ID) {
4472 bool ScalarEvolution::runOnFunction(Function &F) {
4474 LI = &getAnalysis<LoopInfo>();
4475 TD = getAnalysisIfAvailable<TargetData>();
4479 void ScalarEvolution::releaseMemory() {
4481 BackedgeTakenCounts.clear();
4482 ConstantEvolutionLoopExitValue.clear();
4483 ValuesAtScopes.clear();
4484 UniqueSCEVs.clear();
4485 SCEVAllocator.Reset();
4488 void ScalarEvolution::getAnalysisUsage(AnalysisUsage &AU) const {
4489 AU.setPreservesAll();
4490 AU.addRequiredTransitive<LoopInfo>();
4493 bool ScalarEvolution::hasLoopInvariantBackedgeTakenCount(const Loop *L) {
4494 return !isa<SCEVCouldNotCompute>(getBackedgeTakenCount(L));
4497 static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
4499 // Print all inner loops first
4500 for (Loop::iterator I = L->begin(), E = L->end(); I != E; ++I)
4501 PrintLoopInfo(OS, SE, *I);
4503 OS << "Loop " << L->getHeader()->getName() << ": ";
4505 SmallVector<BasicBlock*, 8> ExitBlocks;
4506 L->getExitBlocks(ExitBlocks);
4507 if (ExitBlocks.size() != 1)
4508 OS << "<multiple exits> ";
4510 if (SE->hasLoopInvariantBackedgeTakenCount(L)) {
4511 OS << "backedge-taken count is " << *SE->getBackedgeTakenCount(L);
4513 OS << "Unpredictable backedge-taken count. ";
4517 OS << "Loop " << L->getHeader()->getName() << ": ";
4519 if (!isa<SCEVCouldNotCompute>(SE->getMaxBackedgeTakenCount(L))) {
4520 OS << "max backedge-taken count is " << *SE->getMaxBackedgeTakenCount(L);
4522 OS << "Unpredictable max backedge-taken count. ";
4528 void ScalarEvolution::print(raw_ostream &OS, const Module* ) const {
4529 // ScalarEvolution's implementaiton of the print method is to print
4530 // out SCEV values of all instructions that are interesting. Doing
4531 // this potentially causes it to create new SCEV objects though,
4532 // which technically conflicts with the const qualifier. This isn't
4533 // observable from outside the class though, so casting away the
4534 // const isn't dangerous.
4535 ScalarEvolution &SE = *const_cast<ScalarEvolution*>(this);
4537 OS << "Classifying expressions for: " << F->getName() << "\n";
4538 for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I)
4539 if (isSCEVable(I->getType())) {
4542 const SCEV *SV = SE.getSCEV(&*I);
4545 const Loop *L = LI->getLoopFor((*I).getParent());
4547 const SCEV *AtUse = SE.getSCEVAtScope(SV, L);
4554 OS << "\t\t" "Exits: ";
4555 const SCEV *ExitValue = SE.getSCEVAtScope(SV, L->getParentLoop());
4556 if (!ExitValue->isLoopInvariant(L)) {
4557 OS << "<<Unknown>>";
4566 OS << "Determining loop execution counts for: " << F->getName() << "\n";
4567 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
4568 PrintLoopInfo(OS, &SE, *I);
4571 void ScalarEvolution::print(std::ostream &o, const Module *M) const {
4572 raw_os_ostream OS(o);