From: Dan Gohman Date: Sun, 14 Jun 2009 22:47:23 +0000 (+0000) Subject: Convert several parts of the ScalarEvolution framework to use X-Git-Url: http://demsky.eecs.uci.edu/git/?a=commitdiff_plain;h=a82752c9eb5fbdd1b7276fde7349ac9453eb5a75;p=oota-llvm.git Convert several parts of the ScalarEvolution framework to use SmallVector instead of std::vector. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@73357 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/include/llvm/Analysis/ScalarEvolution.h b/include/llvm/Analysis/ScalarEvolution.h index 41725be1ca3..62bd5dc1a1c 100644 --- a/include/llvm/Analysis/ScalarEvolution.h +++ b/include/llvm/Analysis/ScalarEvolution.h @@ -394,24 +394,24 @@ namespace llvm { SCEVHandle getZeroExtendExpr(const SCEVHandle &Op, const Type *Ty); SCEVHandle getSignExtendExpr(const SCEVHandle &Op, const Type *Ty); SCEVHandle getAnyExtendExpr(const SCEVHandle &Op, const Type *Ty); - SCEVHandle getAddExpr(std::vector &Ops); + SCEVHandle getAddExpr(SmallVectorImpl &Ops); SCEVHandle getAddExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - std::vector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getAddExpr(Ops); } SCEVHandle getAddExpr(const SCEVHandle &Op0, const SCEVHandle &Op1, const SCEVHandle &Op2) { - std::vector Ops; + SmallVector Ops; Ops.push_back(Op0); Ops.push_back(Op1); Ops.push_back(Op2); return getAddExpr(Ops); } - SCEVHandle getMulExpr(std::vector &Ops); + SCEVHandle getMulExpr(SmallVectorImpl &Ops); SCEVHandle getMulExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - std::vector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getMulExpr(Ops); @@ -419,17 +419,17 @@ namespace llvm { SCEVHandle getUDivExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); SCEVHandle getAddRecExpr(const SCEVHandle &Start, const SCEVHandle &Step, const Loop *L); - SCEVHandle getAddRecExpr(std::vector &Operands, + SCEVHandle getAddRecExpr(SmallVectorImpl &Operands, const Loop *L); - SCEVHandle getAddRecExpr(const std::vector &Operands, + SCEVHandle getAddRecExpr(const SmallVectorImpl &Operands, const Loop *L) { - std::vector NewOp(Operands); + SmallVector NewOp(Operands.begin(), Operands.end()); return getAddRecExpr(NewOp, L); } SCEVHandle getSMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getSMaxExpr(std::vector Operands); + SCEVHandle getSMaxExpr(SmallVectorImpl &Operands); SCEVHandle getUMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS); - SCEVHandle getUMaxExpr(std::vector Operands); + SCEVHandle getUMaxExpr(SmallVectorImpl &Operands); SCEVHandle getUnknown(Value *V); SCEVHandle getCouldNotCompute(); diff --git a/include/llvm/Analysis/ScalarEvolutionExpressions.h b/include/llvm/Analysis/ScalarEvolutionExpressions.h index 1978055b1af..ced220aa21c 100644 --- a/include/llvm/Analysis/ScalarEvolutionExpressions.h +++ b/include/llvm/Analysis/ScalarEvolutionExpressions.h @@ -199,10 +199,10 @@ namespace llvm { /// class SCEVNAryExpr : public SCEV { protected: - std::vector Operands; + SmallVector Operands; - SCEVNAryExpr(enum SCEVTypes T, const std::vector &ops) - : SCEV(T), Operands(ops) {} + SCEVNAryExpr(enum SCEVTypes T, const SmallVectorImpl &ops) + : SCEV(T), Operands(ops.begin(), ops.end()) {} virtual ~SCEVNAryExpr() {} public: @@ -212,8 +212,8 @@ namespace llvm { return Operands[i]; } - const std::vector &getOperands() const { return Operands; } - typedef std::vector::const_iterator op_iterator; + const SmallVectorImpl &getOperands() const { return Operands; } + typedef SmallVectorImpl::const_iterator op_iterator; op_iterator op_begin() const { return Operands.begin(); } op_iterator op_end() const { return Operands.end(); } @@ -259,7 +259,7 @@ namespace llvm { /// class SCEVCommutativeExpr : public SCEVNAryExpr { protected: - SCEVCommutativeExpr(enum SCEVTypes T, const std::vector &ops) + SCEVCommutativeExpr(enum SCEVTypes T, const SmallVectorImpl &ops) : SCEVNAryExpr(T, ops) {} ~SCEVCommutativeExpr(); @@ -289,7 +289,7 @@ namespace llvm { class SCEVAddExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVAddExpr(const std::vector &ops) + explicit SCEVAddExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scAddExpr, ops) { } @@ -309,7 +309,7 @@ namespace llvm { class SCEVMulExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVMulExpr(const std::vector &ops) + explicit SCEVMulExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scMulExpr, ops) { } @@ -387,7 +387,7 @@ namespace llvm { const Loop *L; - SCEVAddRecExpr(const std::vector &ops, const Loop *l) + SCEVAddRecExpr(const SmallVectorImpl &ops, const Loop *l) : SCEVNAryExpr(scAddRecExpr, ops), L(l) { for (size_t i = 0, e = Operands.size(); i != e; ++i) assert(Operands[i]->isLoopInvariant(l) && @@ -404,7 +404,7 @@ namespace llvm { /// of degree N, it returns a chrec of degree N-1. SCEVHandle getStepRecurrence(ScalarEvolution &SE) const { if (isAffine()) return getOperand(1); - return SE.getAddRecExpr(std::vector(op_begin()+1,op_end()), + return SE.getAddRecExpr(SmallVector(op_begin()+1,op_end()), getLoop()); } @@ -463,7 +463,7 @@ namespace llvm { class SCEVSMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVSMaxExpr(const std::vector &ops) + explicit SCEVSMaxExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scSMaxExpr, ops) { } @@ -484,7 +484,7 @@ namespace llvm { class SCEVUMaxExpr : public SCEVCommutativeExpr { friend class ScalarEvolution; - explicit SCEVUMaxExpr(const std::vector &ops) + explicit SCEVUMaxExpr(const SmallVectorImpl &ops) : SCEVCommutativeExpr(scUMaxExpr, ops) { } diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 98ab6f484ea..8357ddbc344 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -293,7 +293,7 @@ replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, SCEVHandle H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - std::vector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -373,7 +373,7 @@ replaceSymbolicValuesWithConcrete(const SCEVHandle &Sym, SCEVHandle H = getOperand(i)->replaceSymbolicValuesWithConcrete(Sym, Conc, SE); if (H != getOperand(i)) { - std::vector NewOps; + SmallVector NewOps; NewOps.reserve(getNumOperands()); for (unsigned j = 0; j != i; ++j) NewOps.push_back(getOperand(j)); @@ -558,7 +558,7 @@ namespace { /// this to depend on where the addresses of various SCEV objects happened to /// land in memory. /// -static void GroupByComplexity(std::vector &Ops, +static void GroupByComplexity(SmallVectorImpl &Ops, LoopInfo *LI) { if (Ops.size() < 2) return; // Noop if (Ops.size() == 2) { @@ -766,7 +766,7 @@ SCEVHandle ScalarEvolution::getTruncateExpr(const SCEVHandle &Op, // If the input value is a chrec scev made out of constants, truncate // all of the constants. if (const SCEVAddRecExpr *AddRec = dyn_cast(Op)) { - std::vector Operands; + SmallVector Operands; for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) Operands.push_back(getTruncateExpr(AddRec->getOperand(i), Ty)); return getAddRecExpr(Operands, AddRec->getLoop()); @@ -981,7 +981,7 @@ SCEVHandle ScalarEvolution::getAnyExtendExpr(const SCEVHandle &Op, /// getAddExpr - Get a canonical add expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { +SCEVHandle ScalarEvolution::getAddExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty add!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1001,9 +1001,8 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { assert(Idx < Ops.size()); while (const SCEVConstant *RHSC = dyn_cast(Ops[Idx])) { // We found two constants, fold them together! - ConstantInt *Fold = ConstantInt::get(LHSC->getValue()->getValue() + - RHSC->getValue()->getValue()); - Ops[0] = getConstant(Fold); + Ops[0] = getConstant(LHSC->getValue()->getValue() + + RHSC->getValue()->getValue()); Ops.erase(Ops.begin()+1); // Erase the folded element if (Ops.size() == 1) return Ops[0]; LHSC = cast(Ops[0]); @@ -1043,7 +1042,7 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { const SCEVTruncateExpr *Trunc = cast(Ops[Idx]); const Type *DstType = Trunc->getType(); const Type *SrcType = Trunc->getOperand()->getType(); - std::vector LargeOps; + SmallVector LargeOps; bool Ok = true; // Check all the operands to see if they can be represented in the // source type of the truncate. @@ -1059,7 +1058,7 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { // is much more likely to be foldable here. LargeOps.push_back(getSignExtendExpr(C, SrcType)); } else if (const SCEVMulExpr *M = dyn_cast(Ops[i])) { - std::vector LargeMulOps; + SmallVector LargeMulOps; for (unsigned j = 0, f = M->getNumOperands(); j != f && Ok; ++j) { if (const SCEVTruncateExpr *T = dyn_cast(M->getOperand(j))) { @@ -1128,13 +1127,13 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { for (unsigned MulOp = 0, e = Mul->getNumOperands(); MulOp != e; ++MulOp) { const SCEV *MulOpSCEV = Mul->getOperand(MulOp); for (unsigned AddOp = 0, e = Ops.size(); AddOp != e; ++AddOp) - if (MulOpSCEV == Ops[AddOp] && !isa(MulOpSCEV)) { + if (MulOpSCEV == Ops[AddOp] && !isa(Ops[AddOp])) { // Fold W + X + (X * Y * Z) --> W + (X * ((Y*Z)+1)) SCEVHandle InnerMul = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { // If the multiply has more than two operands, we must get the // Y*Z term. - std::vector MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul = getMulExpr(MulOps); } @@ -1166,13 +1165,13 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { // Fold X + (A*B*C) + (A*D*E) --> X + (A*(B*C+D*E)) SCEVHandle InnerMul1 = Mul->getOperand(MulOp == 0); if (Mul->getNumOperands() != 2) { - std::vector MulOps(Mul->op_begin(), Mul->op_end()); + SmallVector MulOps(Mul->op_begin(), Mul->op_end()); MulOps.erase(MulOps.begin()+MulOp); InnerMul1 = getMulExpr(MulOps); } SCEVHandle InnerMul2 = OtherMul->getOperand(OMulOp == 0); if (OtherMul->getNumOperands() != 2) { - std::vector MulOps(OtherMul->op_begin(), + SmallVector MulOps(OtherMul->op_begin(), OtherMul->op_end()); MulOps.erase(MulOps.begin()+OMulOp); InnerMul2 = getMulExpr(MulOps); @@ -1199,7 +1198,7 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this add and add them to the vector if // they are loop invariant w.r.t. the recurrence. - std::vector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1213,7 +1212,8 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { // NLI + LI + {Start,+,Step} --> NLI + {LI+Start,+,Step} LIOps.push_back(AddRec->getStart()); - std::vector AddRecOps(AddRec->op_begin(), AddRec->op_end()); + SmallVector AddRecOps(AddRec->op_begin(), + AddRec->op_end()); AddRecOps[0] = getAddExpr(LIOps); SCEVHandle NewRec = getAddRecExpr(AddRecOps, AddRec->getLoop()); @@ -1238,7 +1238,7 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { const SCEVAddRecExpr *OtherAddRec = cast(Ops[OtherIdx]); if (AddRec->getLoop() == OtherAddRec->getLoop()) { // Other + {A,+,B} + {C,+,D} --> Other + {A+C,+,B+D} - std::vector NewOps(AddRec->op_begin(), AddRec->op_end()); + SmallVector NewOps(AddRec->op_begin(), AddRec->op_end()); for (unsigned i = 0, e = OtherAddRec->getNumOperands(); i != e; ++i) { if (i >= NewOps.size()) { NewOps.insert(NewOps.end(), OtherAddRec->op_begin()+i, @@ -1274,7 +1274,7 @@ SCEVHandle ScalarEvolution::getAddExpr(std::vector &Ops) { /// getMulExpr - Get a canonical multiply expression, or something simpler if /// possible. -SCEVHandle ScalarEvolution::getMulExpr(std::vector &Ops) { +SCEVHandle ScalarEvolution::getMulExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty mul!"); #ifndef NDEBUG for (unsigned i = 1, e = Ops.size(); i != e; ++i) @@ -1355,7 +1355,7 @@ SCEVHandle ScalarEvolution::getMulExpr(std::vector &Ops) { for (; Idx < Ops.size() && isa(Ops[Idx]); ++Idx) { // Scan all of the other operands to this mul and add them to the vector if // they are loop invariant w.r.t. the recurrence. - std::vector LIOps; + SmallVector LIOps; const SCEVAddRecExpr *AddRec = cast(Ops[Idx]); for (unsigned i = 0, e = Ops.size(); i != e; ++i) if (Ops[i]->isLoopInvariant(AddRec->getLoop())) { @@ -1367,7 +1367,7 @@ SCEVHandle ScalarEvolution::getMulExpr(std::vector &Ops) { // If we found some loop invariants, fold them into the recurrence. if (!LIOps.empty()) { // NLI * LI * {Start,+,Step} --> NLI * {LI*Start,+,LI*Step} - std::vector NewOps; + SmallVector NewOps; NewOps.reserve(AddRec->getNumOperands()); if (LIOps.size() == 1) { const SCEV *Scale = LIOps[0]; @@ -1375,7 +1375,7 @@ SCEVHandle ScalarEvolution::getMulExpr(std::vector &Ops) { NewOps.push_back(getMulExpr(Scale, AddRec->getOperand(i))); } else { for (unsigned i = 0, e = AddRec->getNumOperands(); i != e; ++i) { - std::vector MulOps(LIOps); + SmallVector MulOps(LIOps.begin(), LIOps.end()); MulOps.push_back(AddRec->getOperand(i)); NewOps.push_back(getMulExpr(MulOps)); } @@ -1473,14 +1473,14 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, getAddRecExpr(getZeroExtendExpr(AR->getStart(), ExtTy), getZeroExtendExpr(Step, ExtTy), AR->getLoop())) { - std::vector Operands; + SmallVector Operands; for (unsigned i = 0, e = AR->getNumOperands(); i != e; ++i) Operands.push_back(getUDivExpr(AR->getOperand(i), RHS)); return getAddRecExpr(Operands, AR->getLoop()); } // (A*B)/C --> A*(B/C) if safe and B/C can be folded. if (const SCEVMulExpr *M = dyn_cast(LHS)) { - std::vector Operands; + SmallVector Operands; for (unsigned i = 0, e = M->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(M->getOperand(i), ExtTy)); if (getZeroExtendExpr(M, ExtTy) == getMulExpr(Operands)) @@ -1489,7 +1489,9 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, SCEVHandle Op = M->getOperand(i); SCEVHandle Div = getUDivExpr(Op, RHSC); if (!isa(Div) && getMulExpr(Div, RHSC) == Op) { - Operands = M->getOperands(); + const SmallVectorImpl &MOperands = M->getOperands(); + Operands = SmallVector(MOperands.begin(), + MOperands.end()); Operands[i] = Div; return getMulExpr(Operands); } @@ -1497,7 +1499,7 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, } // (A+B)/C --> (A/C + B/C) if safe and A/C and B/C can be folded. if (const SCEVAddRecExpr *A = dyn_cast(LHS)) { - std::vector Operands; + SmallVector Operands; for (unsigned i = 0, e = A->getNumOperands(); i != e; ++i) Operands.push_back(getZeroExtendExpr(A->getOperand(i), ExtTy)); if (getZeroExtendExpr(A, ExtTy) == getAddExpr(Operands)) { @@ -1531,7 +1533,7 @@ SCEVHandle ScalarEvolution::getUDivExpr(const SCEVHandle &LHS, /// Simplify the expression as much as possible. SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, const SCEVHandle &Step, const Loop *L) { - std::vector Operands; + SmallVector Operands; Operands.push_back(Start); if (const SCEVAddRecExpr *StepChrec = dyn_cast(Step)) if (StepChrec->getLoop() == L) { @@ -1546,7 +1548,7 @@ SCEVHandle ScalarEvolution::getAddRecExpr(const SCEVHandle &Start, /// getAddRecExpr - Get an add recurrence expression for the specified loop. /// Simplify the expression as much as possible. -SCEVHandle ScalarEvolution::getAddRecExpr(std::vector &Operands, +SCEVHandle ScalarEvolution::getAddRecExpr(SmallVectorImpl &Operands, const Loop *L) { if (Operands.size() == 1) return Operands[0]; #ifndef NDEBUG @@ -1565,8 +1567,8 @@ SCEVHandle ScalarEvolution::getAddRecExpr(std::vector &Operands, if (const SCEVAddRecExpr *NestedAR = dyn_cast(Operands[0])) { const Loop* NestedLoop = NestedAR->getLoop(); if (L->getLoopDepth() < NestedLoop->getLoopDepth()) { - std::vector NestedOperands(NestedAR->op_begin(), - NestedAR->op_end()); + SmallVector NestedOperands(NestedAR->op_begin(), + NestedAR->op_end()); SCEVHandle NestedARHandle(NestedAR); Operands[0] = NestedAR->getStart(); NestedOperands[0] = getAddRecExpr(Operands, L); @@ -1582,13 +1584,14 @@ SCEVHandle ScalarEvolution::getAddRecExpr(std::vector &Operands, SCEVHandle ScalarEvolution::getSMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - std::vector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getSMaxExpr(Ops); } -SCEVHandle ScalarEvolution::getSMaxExpr(std::vector Ops) { +SCEVHandle +ScalarEvolution::getSMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty smax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -1668,13 +1671,14 @@ SCEVHandle ScalarEvolution::getSMaxExpr(std::vector Ops) { SCEVHandle ScalarEvolution::getUMaxExpr(const SCEVHandle &LHS, const SCEVHandle &RHS) { - std::vector Ops; + SmallVector Ops; Ops.push_back(LHS); Ops.push_back(RHS); return getUMaxExpr(Ops); } -SCEVHandle ScalarEvolution::getUMaxExpr(std::vector Ops) { +SCEVHandle +ScalarEvolution::getUMaxExpr(SmallVectorImpl &Ops) { assert(!Ops.empty() && "Cannot get empty umax!"); if (Ops.size() == 1) return Ops[0]; #ifndef NDEBUG @@ -2040,7 +2044,7 @@ SCEVHandle ScalarEvolution::createNodeForPHI(PHINode *PN) { if (FoundIndex != Add->getNumOperands()) { // Create an add with everything but the specified operand. - std::vector Ops; + SmallVector Ops; for (unsigned i = 0, e = Add->getNumOperands(); i != e; ++i) if (i != FoundIndex) Ops.push_back(Add->getOperand(i)); @@ -3074,7 +3078,7 @@ SCEVHandle ScalarEvolution::getSCEVAtScope(const SCEV *V, const Loop *L) { if (OpAtScope != Comm->getOperand(i)) { // Okay, at least one of these operands is loop variant but might be // foldable. Build a new instance of the folded commutative expression. - std::vector NewOps(Comm->op_begin(), Comm->op_begin()+i); + SmallVector NewOps(Comm->op_begin(), Comm->op_begin()+i); NewOps.push_back(OpAtScope); for (++i; i != e; ++i) { @@ -3611,7 +3615,7 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // If the start is a non-zero constant, shift the range to simplify things. if (const SCEVConstant *SC = dyn_cast(getStart())) if (!SC->getValue()->isZero()) { - std::vector Operands(op_begin(), op_end()); + SmallVector Operands(op_begin(), op_end()); Operands[0] = SE.getIntegerSCEV(0, SC->getType()); SCEVHandle Shifted = SE.getAddRecExpr(Operands, getLoop()); if (const SCEVAddRecExpr *ShiftedAddRec = @@ -3672,7 +3676,7 @@ SCEVHandle SCEVAddRecExpr::getNumIterationsInRange(ConstantRange Range, // quadratic equation to solve it. To do this, we must frame our problem in // terms of figuring out when zero is crossed, instead of when // Range.getUpper() is crossed. - std::vector NewOps(op_begin(), op_end()); + SmallVector NewOps(op_begin(), op_end()); NewOps[0] = SE.getNegativeSCEV(SE.getConstant(Range.getUpper())); SCEVHandle NewAddRec = SE.getAddRecExpr(NewOps, getLoop()); diff --git a/lib/Analysis/ScalarEvolutionExpander.cpp b/lib/Analysis/ScalarEvolutionExpander.cpp index e1f8fa421f5..abfe94dc80b 100644 --- a/lib/Analysis/ScalarEvolutionExpander.cpp +++ b/lib/Analysis/ScalarEvolutionExpander.cpp @@ -182,7 +182,8 @@ static bool FactorOutConstant(SCEVHandle &S, if (const SCEVMulExpr *M = dyn_cast(S)) if (const SCEVConstant *C = dyn_cast(M->getOperand(0))) if (!C->getValue()->getValue().srem(Factor)) { - std::vector NewMulOps(M->getOperands()); + const SmallVectorImpl &MOperands = M->getOperands(); + SmallVector NewMulOps(MOperands.begin(), MOperands.end()); NewMulOps[0] = SE.getConstant(C->getValue()->getValue().sdiv(Factor)); S = SE.getMulExpr(NewMulOps); @@ -239,7 +240,7 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin, Value *V) { const Type *ElTy = PTy->getElementType(); SmallVector GepIndices; - std::vector Ops(op_begin, op_end); + SmallVector Ops(op_begin, op_end); bool AnyNonZeroIndices = false; // Decend down the pointer's type and attempt to convert the other @@ -250,8 +251,8 @@ Value *SCEVExpander::expandAddToGEP(const SCEVHandle *op_begin, for (;;) { APInt ElSize = APInt(SE.getTypeSizeInBits(Ty), ElTy->isSized() ? SE.TD->getTypeAllocSize(ElTy) : 0); - std::vector NewOps; - std::vector ScaledOps; + SmallVector NewOps; + SmallVector ScaledOps; for (unsigned i = 0, e = Ops.size(); i != e; ++i) { // Split AddRecs up into parts as either of the parts may be usable // without the other. @@ -365,7 +366,7 @@ Value *SCEVExpander::visitAddExpr(const SCEVAddExpr *S) { // comments on expandAddToGEP for details. if (SE.TD) if (const PointerType *PTy = dyn_cast(V->getType())) { - const std::vector &Ops = S->getOperands(); + const SmallVectorImpl &Ops = S->getOperands(); return expandAddToGEP(&Ops[0], &Ops[Ops.size() - 1], PTy, Ty, V); } @@ -432,7 +433,7 @@ static void ExposePointerBase(SCEVHandle &Base, SCEVHandle &Rest, } if (const SCEVAddExpr *A = dyn_cast(Base)) { Base = A->getOperand(A->getNumOperands()-1); - std::vector NewAddOps(A->op_begin(), A->op_end()); + SmallVector NewAddOps(A->op_begin(), A->op_end()); NewAddOps.back() = Rest; Rest = SE.getAddExpr(NewAddOps); ExposePointerBase(Base, Rest, SE); @@ -473,7 +474,8 @@ Value *SCEVExpander::visitAddRecExpr(const SCEVAddRecExpr *S) { // {X,+,F} --> X + {0,+,F} if (!S->getStart()->isZero()) { - std::vector NewOps(S->getOperands()); + const SmallVectorImpl &SOperands = S->getOperands(); + SmallVector NewOps(SOperands.begin(), SOperands.end()); NewOps[0] = SE.getIntegerSCEV(0, Ty); SCEVHandle Rest = SE.getAddRecExpr(NewOps, L); diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 944f4093191..5603042617c 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -592,7 +592,7 @@ static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm, if (Val->isLoopInvariant(L)) return; // Nothing to do. if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - std::vector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) @@ -613,7 +613,7 @@ static void MoveLoopVariantsToImmediateField(SCEVHandle &Val, SCEVHandle &Imm, SCEVHandle Start = SARE->getStart(); MoveLoopVariantsToImmediateField(Start, Imm, L, SE); - std::vector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } else { @@ -633,7 +633,7 @@ static void MoveImmediateValues(const TargetLowering *TLI, bool isAddress, Loop *L, ScalarEvolution *SE) { if (const SCEVAddExpr *SAE = dyn_cast(Val)) { - std::vector NewOps; + SmallVector NewOps; NewOps.reserve(SAE->getNumOperands()); for (unsigned i = 0; i != SAE->getNumOperands(); ++i) { @@ -660,7 +660,7 @@ static void MoveImmediateValues(const TargetLowering *TLI, MoveImmediateValues(TLI, AccessTy, Start, Imm, isAddress, L, SE); if (Start != SARE->getStart()) { - std::vector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Start; Val = SE->getAddRecExpr(Ops, SARE->getLoop()); } @@ -717,7 +717,7 @@ static void MoveImmediateValues(const TargetLowering *TLI, /// SeparateSubExprs - Decompose Expr into all of the subexpressions that are /// added together. This is used to reassociate common addition subexprs /// together for maximal sharing when rewriting bases. -static void SeparateSubExprs(std::vector &SubExprs, +static void SeparateSubExprs(SmallVector &SubExprs, SCEVHandle Expr, ScalarEvolution *SE) { if (const SCEVAddExpr *AE = dyn_cast(Expr)) { @@ -729,7 +729,7 @@ static void SeparateSubExprs(std::vector &SubExprs, SubExprs.push_back(Expr); } else { // Compute the addrec with zero as its base. - std::vector Ops(SARE->op_begin(), SARE->op_end()); + SmallVector Ops(SARE->op_begin(), SARE->op_end()); Ops[0] = Zero; // Start with zero base. SubExprs.push_back(SE->getAddRecExpr(Ops, SARE->getLoop())); @@ -783,9 +783,9 @@ RemoveCommonExpressionsFromUseBases(std::vector &Uses, // UniqueSubExprs - Keep track of all of the subexpressions we see in the // order we see them. - std::vector UniqueSubExprs; + SmallVector UniqueSubExprs; - std::vector SubExprs; + SmallVector SubExprs; unsigned NumUsesInsideLoop = 0; for (unsigned i = 0; i != NumUses; ++i) { // If the user is outside the loop, just ignore it for base computation.