1 //===- LoopStrengthReduce.cpp - Strength Reduce IVs in Loops --------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This transformation analyzes and transforms the induction variables (and
11 // computations derived from them) into forms suitable for efficient execution
14 // This pass performs a strength reduction on array references inside loops that
15 // have as one or more of their components the loop induction variable, it
16 // rewrites expressions to take advantage of scaled-index addressing modes
17 // available on the target, and it performs a variety of other optimizations
18 // related to loop induction variables.
20 // Terminology note: this code has a lot of handling for "post-increment" or
21 // "post-inc" users. This is not talking about post-increment addressing modes;
22 // it is instead talking about code like this:
24 // %i = phi [ 0, %entry ], [ %i.next, %latch ]
26 // %i.next = add %i, 1
27 // %c = icmp eq %i.next, %n
29 // The SCEV for %i is {0,+,1}<%L>. The SCEV for %i.next is {1,+,1}<%L>, however
30 // it's useful to think about these as the same register, with some uses using
31 // the value of the register before the add and some using // it after. In this
32 // example, the icmp is a post-increment user, since it uses %i.next, which is
33 // the value of the induction variable after the increment. The other common
34 // case of post-increment users is users outside the loop.
36 // TODO: More sophistication in the way Formulae are generated and filtered.
38 // TODO: Handle multiple loops at a time.
40 // TODO: Should TargetLowering::AddrMode::BaseGV be changed to a ConstantExpr
41 // instead of a GlobalValue?
43 // TODO: When truncation is free, truncate ICmp users' operands to make it a
44 // smaller encoding (on x86 at least).
46 // TODO: When a negated register is used by an add (such as in a list of
47 // multiple base registers, or as the increment expression in an addrec),
48 // we may not actually need both reg and (-1 * reg) in registers; the
49 // negation can be implemented by using a sub instead of an add. The
50 // lack of support for taking this into consideration when making
51 // register pressure decisions is partly worked around by the "Special"
54 //===----------------------------------------------------------------------===//
56 #define DEBUG_TYPE "loop-reduce"
57 #include "llvm/Transforms/Scalar.h"
58 #include "llvm/Constants.h"
59 #include "llvm/Instructions.h"
60 #include "llvm/IntrinsicInst.h"
61 #include "llvm/DerivedTypes.h"
62 #include "llvm/Analysis/IVUsers.h"
63 #include "llvm/Analysis/Dominators.h"
64 #include "llvm/Analysis/LoopPass.h"
65 #include "llvm/Analysis/ScalarEvolutionExpander.h"
66 #include "llvm/Assembly/Writer.h"
67 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
68 #include "llvm/Transforms/Utils/Local.h"
69 #include "llvm/ADT/SmallBitVector.h"
70 #include "llvm/ADT/SetVector.h"
71 #include "llvm/ADT/DenseSet.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/CommandLine.h"
74 #include "llvm/Support/ValueHandle.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Target/TargetLowering.h"
80 static cl::opt<bool> EnableNested(
81 "enable-lsr-nested", cl::Hidden, cl::desc("Enable LSR on nested loops"));
83 static cl::opt<bool> EnableRetry(
84 "enable-lsr-retry", cl::Hidden, cl::desc("Enable LSR retry"));
86 // Temporary flag to cleanup congruent phis after LSR phi expansion.
87 // It's currently disabled until we can determine whether it's truly useful or
88 // not. The flag should be removed after the v3.0 release.
89 static cl::opt<bool> EnablePhiElim(
90 "enable-lsr-phielim", cl::Hidden, cl::desc("Enable LSR phi elimination"));
94 /// RegSortData - This class holds data which is used to order reuse candidates.
97 /// UsedByIndices - This represents the set of LSRUse indices which reference
98 /// a particular register.
99 SmallBitVector UsedByIndices;
103 void print(raw_ostream &OS) const;
109 void RegSortData::print(raw_ostream &OS) const {
110 OS << "[NumUses=" << UsedByIndices.count() << ']';
113 void RegSortData::dump() const {
114 print(errs()); errs() << '\n';
119 /// RegUseTracker - Map register candidates to information about how they are
121 class RegUseTracker {
122 typedef DenseMap<const SCEV *, RegSortData> RegUsesTy;
124 RegUsesTy RegUsesMap;
125 SmallVector<const SCEV *, 16> RegSequence;
128 void CountRegister(const SCEV *Reg, size_t LUIdx);
129 void DropRegister(const SCEV *Reg, size_t LUIdx);
130 void SwapAndDropUse(size_t LUIdx, size_t LastLUIdx);
132 bool isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const;
134 const SmallBitVector &getUsedByIndices(const SCEV *Reg) const;
138 typedef SmallVectorImpl<const SCEV *>::iterator iterator;
139 typedef SmallVectorImpl<const SCEV *>::const_iterator const_iterator;
140 iterator begin() { return RegSequence.begin(); }
141 iterator end() { return RegSequence.end(); }
142 const_iterator begin() const { return RegSequence.begin(); }
143 const_iterator end() const { return RegSequence.end(); }
149 RegUseTracker::CountRegister(const SCEV *Reg, size_t LUIdx) {
150 std::pair<RegUsesTy::iterator, bool> Pair =
151 RegUsesMap.insert(std::make_pair(Reg, RegSortData()));
152 RegSortData &RSD = Pair.first->second;
154 RegSequence.push_back(Reg);
155 RSD.UsedByIndices.resize(std::max(RSD.UsedByIndices.size(), LUIdx + 1));
156 RSD.UsedByIndices.set(LUIdx);
160 RegUseTracker::DropRegister(const SCEV *Reg, size_t LUIdx) {
161 RegUsesTy::iterator It = RegUsesMap.find(Reg);
162 assert(It != RegUsesMap.end());
163 RegSortData &RSD = It->second;
164 assert(RSD.UsedByIndices.size() > LUIdx);
165 RSD.UsedByIndices.reset(LUIdx);
169 RegUseTracker::SwapAndDropUse(size_t LUIdx, size_t LastLUIdx) {
170 assert(LUIdx <= LastLUIdx);
172 // Update RegUses. The data structure is not optimized for this purpose;
173 // we must iterate through it and update each of the bit vectors.
174 for (RegUsesTy::iterator I = RegUsesMap.begin(), E = RegUsesMap.end();
176 SmallBitVector &UsedByIndices = I->second.UsedByIndices;
177 if (LUIdx < UsedByIndices.size())
178 UsedByIndices[LUIdx] =
179 LastLUIdx < UsedByIndices.size() ? UsedByIndices[LastLUIdx] : 0;
180 UsedByIndices.resize(std::min(UsedByIndices.size(), LastLUIdx));
185 RegUseTracker::isRegUsedByUsesOtherThan(const SCEV *Reg, size_t LUIdx) const {
186 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
187 if (I == RegUsesMap.end())
189 const SmallBitVector &UsedByIndices = I->second.UsedByIndices;
190 int i = UsedByIndices.find_first();
191 if (i == -1) return false;
192 if ((size_t)i != LUIdx) return true;
193 return UsedByIndices.find_next(i) != -1;
196 const SmallBitVector &RegUseTracker::getUsedByIndices(const SCEV *Reg) const {
197 RegUsesTy::const_iterator I = RegUsesMap.find(Reg);
198 assert(I != RegUsesMap.end() && "Unknown register!");
199 return I->second.UsedByIndices;
202 void RegUseTracker::clear() {
209 /// Formula - This class holds information that describes a formula for
210 /// computing satisfying a use. It may include broken-out immediates and scaled
213 /// AM - This is used to represent complex addressing, as well as other kinds
214 /// of interesting uses.
215 TargetLowering::AddrMode AM;
217 /// BaseRegs - The list of "base" registers for this use. When this is
218 /// non-empty, AM.HasBaseReg should be set to true.
219 SmallVector<const SCEV *, 2> BaseRegs;
221 /// ScaledReg - The 'scaled' register for this use. This should be non-null
222 /// when AM.Scale is not zero.
223 const SCEV *ScaledReg;
225 /// UnfoldedOffset - An additional constant offset which added near the
226 /// use. This requires a temporary register, but the offset itself can
227 /// live in an add immediate field rather than a register.
228 int64_t UnfoldedOffset;
230 Formula() : ScaledReg(0), UnfoldedOffset(0) {}
232 void InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE);
234 unsigned getNumRegs() const;
235 Type *getType() const;
237 void DeleteBaseReg(const SCEV *&S);
239 bool referencesReg(const SCEV *S) const;
240 bool hasRegsUsedByUsesOtherThan(size_t LUIdx,
241 const RegUseTracker &RegUses) const;
243 void print(raw_ostream &OS) const;
249 /// DoInitialMatch - Recursion helper for InitialMatch.
250 static void DoInitialMatch(const SCEV *S, Loop *L,
251 SmallVectorImpl<const SCEV *> &Good,
252 SmallVectorImpl<const SCEV *> &Bad,
253 ScalarEvolution &SE) {
254 // Collect expressions which properly dominate the loop header.
255 if (SE.properlyDominates(S, L->getHeader())) {
260 // Look at add operands.
261 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
262 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
264 DoInitialMatch(*I, L, Good, Bad, SE);
268 // Look at addrec operands.
269 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S))
270 if (!AR->getStart()->isZero()) {
271 DoInitialMatch(AR->getStart(), L, Good, Bad, SE);
272 DoInitialMatch(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
273 AR->getStepRecurrence(SE),
274 // FIXME: AR->getNoWrapFlags()
275 AR->getLoop(), SCEV::FlagAnyWrap),
280 // Handle a multiplication by -1 (negation) if it didn't fold.
281 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S))
282 if (Mul->getOperand(0)->isAllOnesValue()) {
283 SmallVector<const SCEV *, 4> Ops(Mul->op_begin()+1, Mul->op_end());
284 const SCEV *NewMul = SE.getMulExpr(Ops);
286 SmallVector<const SCEV *, 4> MyGood;
287 SmallVector<const SCEV *, 4> MyBad;
288 DoInitialMatch(NewMul, L, MyGood, MyBad, SE);
289 const SCEV *NegOne = SE.getSCEV(ConstantInt::getAllOnesValue(
290 SE.getEffectiveSCEVType(NewMul->getType())));
291 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyGood.begin(),
292 E = MyGood.end(); I != E; ++I)
293 Good.push_back(SE.getMulExpr(NegOne, *I));
294 for (SmallVectorImpl<const SCEV *>::const_iterator I = MyBad.begin(),
295 E = MyBad.end(); I != E; ++I)
296 Bad.push_back(SE.getMulExpr(NegOne, *I));
300 // Ok, we can't do anything interesting. Just stuff the whole thing into a
301 // register and hope for the best.
305 /// InitialMatch - Incorporate loop-variant parts of S into this Formula,
306 /// attempting to keep all loop-invariant and loop-computable values in a
307 /// single base register.
308 void Formula::InitialMatch(const SCEV *S, Loop *L, ScalarEvolution &SE) {
309 SmallVector<const SCEV *, 4> Good;
310 SmallVector<const SCEV *, 4> Bad;
311 DoInitialMatch(S, L, Good, Bad, SE);
313 const SCEV *Sum = SE.getAddExpr(Good);
315 BaseRegs.push_back(Sum);
316 AM.HasBaseReg = true;
319 const SCEV *Sum = SE.getAddExpr(Bad);
321 BaseRegs.push_back(Sum);
322 AM.HasBaseReg = true;
326 /// getNumRegs - Return the total number of register operands used by this
327 /// formula. This does not include register uses implied by non-constant
329 unsigned Formula::getNumRegs() const {
330 return !!ScaledReg + BaseRegs.size();
333 /// getType - Return the type of this formula, if it has one, or null
334 /// otherwise. This type is meaningless except for the bit size.
335 Type *Formula::getType() const {
336 return !BaseRegs.empty() ? BaseRegs.front()->getType() :
337 ScaledReg ? ScaledReg->getType() :
338 AM.BaseGV ? AM.BaseGV->getType() :
342 /// DeleteBaseReg - Delete the given base reg from the BaseRegs list.
343 void Formula::DeleteBaseReg(const SCEV *&S) {
344 if (&S != &BaseRegs.back())
345 std::swap(S, BaseRegs.back());
349 /// referencesReg - Test if this formula references the given register.
350 bool Formula::referencesReg(const SCEV *S) const {
351 return S == ScaledReg ||
352 std::find(BaseRegs.begin(), BaseRegs.end(), S) != BaseRegs.end();
355 /// hasRegsUsedByUsesOtherThan - Test whether this formula uses registers
356 /// which are used by uses other than the use with the given index.
357 bool Formula::hasRegsUsedByUsesOtherThan(size_t LUIdx,
358 const RegUseTracker &RegUses) const {
360 if (RegUses.isRegUsedByUsesOtherThan(ScaledReg, LUIdx))
362 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
363 E = BaseRegs.end(); I != E; ++I)
364 if (RegUses.isRegUsedByUsesOtherThan(*I, LUIdx))
369 void Formula::print(raw_ostream &OS) const {
372 if (!First) OS << " + "; else First = false;
373 WriteAsOperand(OS, AM.BaseGV, /*PrintType=*/false);
375 if (AM.BaseOffs != 0) {
376 if (!First) OS << " + "; else First = false;
379 for (SmallVectorImpl<const SCEV *>::const_iterator I = BaseRegs.begin(),
380 E = BaseRegs.end(); I != E; ++I) {
381 if (!First) OS << " + "; else First = false;
382 OS << "reg(" << **I << ')';
384 if (AM.HasBaseReg && BaseRegs.empty()) {
385 if (!First) OS << " + "; else First = false;
386 OS << "**error: HasBaseReg**";
387 } else if (!AM.HasBaseReg && !BaseRegs.empty()) {
388 if (!First) OS << " + "; else First = false;
389 OS << "**error: !HasBaseReg**";
392 if (!First) OS << " + "; else First = false;
393 OS << AM.Scale << "*reg(";
400 if (UnfoldedOffset != 0) {
401 if (!First) OS << " + "; else First = false;
402 OS << "imm(" << UnfoldedOffset << ')';
406 void Formula::dump() const {
407 print(errs()); errs() << '\n';
410 /// isAddRecSExtable - Return true if the given addrec can be sign-extended
411 /// without changing its value.
412 static bool isAddRecSExtable(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
414 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(AR->getType()) + 1);
415 return isa<SCEVAddRecExpr>(SE.getSignExtendExpr(AR, WideTy));
418 /// isAddSExtable - Return true if the given add can be sign-extended
419 /// without changing its value.
420 static bool isAddSExtable(const SCEVAddExpr *A, ScalarEvolution &SE) {
422 IntegerType::get(SE.getContext(), SE.getTypeSizeInBits(A->getType()) + 1);
423 return isa<SCEVAddExpr>(SE.getSignExtendExpr(A, WideTy));
426 /// isMulSExtable - Return true if the given mul can be sign-extended
427 /// without changing its value.
428 static bool isMulSExtable(const SCEVMulExpr *M, ScalarEvolution &SE) {
430 IntegerType::get(SE.getContext(),
431 SE.getTypeSizeInBits(M->getType()) * M->getNumOperands());
432 return isa<SCEVMulExpr>(SE.getSignExtendExpr(M, WideTy));
435 /// getExactSDiv - Return an expression for LHS /s RHS, if it can be determined
436 /// and if the remainder is known to be zero, or null otherwise. If
437 /// IgnoreSignificantBits is true, expressions like (X * Y) /s Y are simplified
438 /// to Y, ignoring that the multiplication may overflow, which is useful when
439 /// the result will be used in a context where the most significant bits are
441 static const SCEV *getExactSDiv(const SCEV *LHS, const SCEV *RHS,
443 bool IgnoreSignificantBits = false) {
444 // Handle the trivial case, which works for any SCEV type.
446 return SE.getConstant(LHS->getType(), 1);
448 // Handle a few RHS special cases.
449 const SCEVConstant *RC = dyn_cast<SCEVConstant>(RHS);
451 const APInt &RA = RC->getValue()->getValue();
452 // Handle x /s -1 as x * -1, to give ScalarEvolution a chance to do
454 if (RA.isAllOnesValue())
455 return SE.getMulExpr(LHS, RC);
456 // Handle x /s 1 as x.
461 // Check for a division of a constant by a constant.
462 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(LHS)) {
465 const APInt &LA = C->getValue()->getValue();
466 const APInt &RA = RC->getValue()->getValue();
467 if (LA.srem(RA) != 0)
469 return SE.getConstant(LA.sdiv(RA));
472 // Distribute the sdiv over addrec operands, if the addrec doesn't overflow.
473 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(LHS)) {
474 if (IgnoreSignificantBits || isAddRecSExtable(AR, SE)) {
475 const SCEV *Step = getExactSDiv(AR->getStepRecurrence(SE), RHS, SE,
476 IgnoreSignificantBits);
478 const SCEV *Start = getExactSDiv(AR->getStart(), RHS, SE,
479 IgnoreSignificantBits);
480 if (!Start) return 0;
481 // FlagNW is independent of the start value, step direction, and is
482 // preserved with smaller magnitude steps.
483 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
484 return SE.getAddRecExpr(Start, Step, AR->getLoop(), SCEV::FlagAnyWrap);
489 // Distribute the sdiv over add operands, if the add doesn't overflow.
490 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(LHS)) {
491 if (IgnoreSignificantBits || isAddSExtable(Add, SE)) {
492 SmallVector<const SCEV *, 8> Ops;
493 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
495 const SCEV *Op = getExactSDiv(*I, RHS, SE,
496 IgnoreSignificantBits);
500 return SE.getAddExpr(Ops);
505 // Check for a multiply operand that we can pull RHS out of.
506 if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(LHS)) {
507 if (IgnoreSignificantBits || isMulSExtable(Mul, SE)) {
508 SmallVector<const SCEV *, 4> Ops;
510 for (SCEVMulExpr::op_iterator I = Mul->op_begin(), E = Mul->op_end();
514 if (const SCEV *Q = getExactSDiv(S, RHS, SE,
515 IgnoreSignificantBits)) {
521 return Found ? SE.getMulExpr(Ops) : 0;
526 // Otherwise we don't know.
530 /// ExtractImmediate - If S involves the addition of a constant integer value,
531 /// return that integer value, and mutate S to point to a new SCEV with that
533 static int64_t ExtractImmediate(const SCEV *&S, ScalarEvolution &SE) {
534 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(S)) {
535 if (C->getValue()->getValue().getMinSignedBits() <= 64) {
536 S = SE.getConstant(C->getType(), 0);
537 return C->getValue()->getSExtValue();
539 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
540 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
541 int64_t Result = ExtractImmediate(NewOps.front(), SE);
543 S = SE.getAddExpr(NewOps);
545 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
546 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
547 int64_t Result = ExtractImmediate(NewOps.front(), SE);
549 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
550 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
557 /// ExtractSymbol - If S involves the addition of a GlobalValue address,
558 /// return that symbol, and mutate S to point to a new SCEV with that
560 static GlobalValue *ExtractSymbol(const SCEV *&S, ScalarEvolution &SE) {
561 if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
562 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue())) {
563 S = SE.getConstant(GV->getType(), 0);
566 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
567 SmallVector<const SCEV *, 8> NewOps(Add->op_begin(), Add->op_end());
568 GlobalValue *Result = ExtractSymbol(NewOps.back(), SE);
570 S = SE.getAddExpr(NewOps);
572 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
573 SmallVector<const SCEV *, 8> NewOps(AR->op_begin(), AR->op_end());
574 GlobalValue *Result = ExtractSymbol(NewOps.front(), SE);
576 S = SE.getAddRecExpr(NewOps, AR->getLoop(),
577 // FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
584 /// isAddressUse - Returns true if the specified instruction is using the
585 /// specified value as an address.
586 static bool isAddressUse(Instruction *Inst, Value *OperandVal) {
587 bool isAddress = isa<LoadInst>(Inst);
588 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
589 if (SI->getOperand(1) == OperandVal)
591 } else if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
592 // Addressing modes can also be folded into prefetches and a variety
594 switch (II->getIntrinsicID()) {
596 case Intrinsic::prefetch:
597 case Intrinsic::x86_sse_storeu_ps:
598 case Intrinsic::x86_sse2_storeu_pd:
599 case Intrinsic::x86_sse2_storeu_dq:
600 case Intrinsic::x86_sse2_storel_dq:
601 if (II->getArgOperand(0) == OperandVal)
609 /// getAccessType - Return the type of the memory being accessed.
610 static Type *getAccessType(const Instruction *Inst) {
611 Type *AccessTy = Inst->getType();
612 if (const StoreInst *SI = dyn_cast<StoreInst>(Inst))
613 AccessTy = SI->getOperand(0)->getType();
614 else if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
615 // Addressing modes can also be folded into prefetches and a variety
617 switch (II->getIntrinsicID()) {
619 case Intrinsic::x86_sse_storeu_ps:
620 case Intrinsic::x86_sse2_storeu_pd:
621 case Intrinsic::x86_sse2_storeu_dq:
622 case Intrinsic::x86_sse2_storel_dq:
623 AccessTy = II->getArgOperand(0)->getType();
628 // All pointers have the same requirements, so canonicalize them to an
629 // arbitrary pointer type to minimize variation.
630 if (PointerType *PTy = dyn_cast<PointerType>(AccessTy))
631 AccessTy = PointerType::get(IntegerType::get(PTy->getContext(), 1),
632 PTy->getAddressSpace());
637 /// isExistingPhi - Return true if this AddRec is already a phi in its loop.
638 static bool isExistingPhi(const SCEVAddRecExpr *AR, ScalarEvolution &SE) {
639 for (BasicBlock::iterator I = AR->getLoop()->getHeader()->begin();
640 PHINode *PN = dyn_cast<PHINode>(I); ++I) {
641 if (SE.isSCEVable(PN->getType()) &&
642 (SE.getEffectiveSCEVType(PN->getType()) ==
643 SE.getEffectiveSCEVType(AR->getType())) &&
644 SE.getSCEV(PN) == AR)
650 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
651 /// specified set are trivially dead, delete them and see if this makes any of
652 /// their operands subsequently dead.
654 DeleteTriviallyDeadInstructions(SmallVectorImpl<WeakVH> &DeadInsts) {
655 bool Changed = false;
657 while (!DeadInsts.empty()) {
658 Instruction *I = dyn_cast_or_null<Instruction>(&*DeadInsts.pop_back_val());
660 if (I == 0 || !isInstructionTriviallyDead(I))
663 for (User::op_iterator OI = I->op_begin(), E = I->op_end(); OI != E; ++OI)
664 if (Instruction *U = dyn_cast<Instruction>(*OI)) {
667 DeadInsts.push_back(U);
670 I->eraseFromParent();
679 /// Cost - This class is used to measure and compare candidate formulae.
681 /// TODO: Some of these could be merged. Also, a lexical ordering
682 /// isn't always optimal.
686 unsigned NumBaseAdds;
692 : NumRegs(0), AddRecCost(0), NumIVMuls(0), NumBaseAdds(0), ImmCost(0),
695 bool operator<(const Cost &Other) const;
700 // Once any of the metrics loses, they must all remain losers.
702 return ((NumRegs | AddRecCost | NumIVMuls | NumBaseAdds
703 | ImmCost | SetupCost) != ~0u)
704 || ((NumRegs & AddRecCost & NumIVMuls & NumBaseAdds
705 & ImmCost & SetupCost) == ~0u);
710 assert(isValid() && "invalid cost");
711 return NumRegs == ~0u;
714 void RateFormula(const Formula &F,
715 SmallPtrSet<const SCEV *, 16> &Regs,
716 const DenseSet<const SCEV *> &VisitedRegs,
718 const SmallVectorImpl<int64_t> &Offsets,
719 ScalarEvolution &SE, DominatorTree &DT,
720 SmallPtrSet<const SCEV *, 16> *LoserRegs = 0);
722 void print(raw_ostream &OS) const;
726 void RateRegister(const SCEV *Reg,
727 SmallPtrSet<const SCEV *, 16> &Regs,
729 ScalarEvolution &SE, DominatorTree &DT);
730 void RatePrimaryRegister(const SCEV *Reg,
731 SmallPtrSet<const SCEV *, 16> &Regs,
733 ScalarEvolution &SE, DominatorTree &DT,
734 SmallPtrSet<const SCEV *, 16> *LoserRegs);
739 /// RateRegister - Tally up interesting quantities from the given register.
740 void Cost::RateRegister(const SCEV *Reg,
741 SmallPtrSet<const SCEV *, 16> &Regs,
743 ScalarEvolution &SE, DominatorTree &DT) {
744 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(Reg)) {
745 if (AR->getLoop() == L)
746 AddRecCost += 1; /// TODO: This should be a function of the stride.
748 // If this is an addrec for another loop, don't second-guess its addrec phi
749 // nodes. LSR isn't currently smart enough to reason about more than one
750 // loop at a time. LSR has either already run on inner loops, will not run
751 // on other loops, and cannot be expected to change sibling loops. If the
752 // AddRec exists, consider it's register free and leave it alone. Otherwise,
753 // do not consider this formula at all.
754 else if (!EnableNested || L->contains(AR->getLoop()) ||
755 (!AR->getLoop()->contains(L) &&
756 DT.dominates(L->getHeader(), AR->getLoop()->getHeader()))) {
757 if (isExistingPhi(AR, SE))
760 // For !EnableNested, never rewrite IVs in other loops.
765 // If this isn't one of the addrecs that the loop already has, it
766 // would require a costly new phi and add. TODO: This isn't
767 // precisely modeled right now.
769 if (!Regs.count(AR->getStart())) {
770 RateRegister(AR->getStart(), Regs, L, SE, DT);
776 // Add the step value register, if it needs one.
777 // TODO: The non-affine case isn't precisely modeled here.
778 if (!AR->isAffine() || !isa<SCEVConstant>(AR->getOperand(1))) {
779 if (!Regs.count(AR->getOperand(1))) {
780 RateRegister(AR->getOperand(1), Regs, L, SE, DT);
788 // Rough heuristic; favor registers which don't require extra setup
789 // instructions in the preheader.
790 if (!isa<SCEVUnknown>(Reg) &&
791 !isa<SCEVConstant>(Reg) &&
792 !(isa<SCEVAddRecExpr>(Reg) &&
793 (isa<SCEVUnknown>(cast<SCEVAddRecExpr>(Reg)->getStart()) ||
794 isa<SCEVConstant>(cast<SCEVAddRecExpr>(Reg)->getStart()))))
797 NumIVMuls += isa<SCEVMulExpr>(Reg) &&
798 SE.hasComputableLoopEvolution(Reg, L);
801 /// RatePrimaryRegister - Record this register in the set. If we haven't seen it
802 /// before, rate it. Optional LoserRegs provides a way to declare any formula
803 /// that refers to one of those regs an instant loser.
804 void Cost::RatePrimaryRegister(const SCEV *Reg,
805 SmallPtrSet<const SCEV *, 16> &Regs,
807 ScalarEvolution &SE, DominatorTree &DT,
808 SmallPtrSet<const SCEV *, 16> *LoserRegs) {
809 if (LoserRegs && LoserRegs->count(Reg)) {
813 if (Regs.insert(Reg)) {
814 RateRegister(Reg, Regs, L, SE, DT);
816 LoserRegs->insert(Reg);
820 void Cost::RateFormula(const Formula &F,
821 SmallPtrSet<const SCEV *, 16> &Regs,
822 const DenseSet<const SCEV *> &VisitedRegs,
824 const SmallVectorImpl<int64_t> &Offsets,
825 ScalarEvolution &SE, DominatorTree &DT,
826 SmallPtrSet<const SCEV *, 16> *LoserRegs) {
827 // Tally up the registers.
828 if (const SCEV *ScaledReg = F.ScaledReg) {
829 if (VisitedRegs.count(ScaledReg)) {
833 RatePrimaryRegister(ScaledReg, Regs, L, SE, DT, LoserRegs);
837 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
838 E = F.BaseRegs.end(); I != E; ++I) {
839 const SCEV *BaseReg = *I;
840 if (VisitedRegs.count(BaseReg)) {
844 RatePrimaryRegister(BaseReg, Regs, L, SE, DT, LoserRegs);
849 // Determine how many (unfolded) adds we'll need inside the loop.
850 size_t NumBaseParts = F.BaseRegs.size() + (F.UnfoldedOffset != 0);
851 if (NumBaseParts > 1)
852 NumBaseAdds += NumBaseParts - 1;
854 // Tally up the non-zero immediates.
855 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
856 E = Offsets.end(); I != E; ++I) {
857 int64_t Offset = (uint64_t)*I + F.AM.BaseOffs;
859 ImmCost += 64; // Handle symbolic values conservatively.
860 // TODO: This should probably be the pointer size.
861 else if (Offset != 0)
862 ImmCost += APInt(64, Offset, true).getMinSignedBits();
864 assert(isValid() && "invalid cost");
867 /// Loose - Set this cost to a losing value.
877 /// operator< - Choose the lower cost.
878 bool Cost::operator<(const Cost &Other) const {
879 if (NumRegs != Other.NumRegs)
880 return NumRegs < Other.NumRegs;
881 if (AddRecCost != Other.AddRecCost)
882 return AddRecCost < Other.AddRecCost;
883 if (NumIVMuls != Other.NumIVMuls)
884 return NumIVMuls < Other.NumIVMuls;
885 if (NumBaseAdds != Other.NumBaseAdds)
886 return NumBaseAdds < Other.NumBaseAdds;
887 if (ImmCost != Other.ImmCost)
888 return ImmCost < Other.ImmCost;
889 if (SetupCost != Other.SetupCost)
890 return SetupCost < Other.SetupCost;
894 void Cost::print(raw_ostream &OS) const {
895 OS << NumRegs << " reg" << (NumRegs == 1 ? "" : "s");
897 OS << ", with addrec cost " << AddRecCost;
899 OS << ", plus " << NumIVMuls << " IV mul" << (NumIVMuls == 1 ? "" : "s");
900 if (NumBaseAdds != 0)
901 OS << ", plus " << NumBaseAdds << " base add"
902 << (NumBaseAdds == 1 ? "" : "s");
904 OS << ", plus " << ImmCost << " imm cost";
906 OS << ", plus " << SetupCost << " setup cost";
909 void Cost::dump() const {
910 print(errs()); errs() << '\n';
915 /// LSRFixup - An operand value in an instruction which is to be replaced
916 /// with some equivalent, possibly strength-reduced, replacement.
918 /// UserInst - The instruction which will be updated.
919 Instruction *UserInst;
921 /// OperandValToReplace - The operand of the instruction which will
922 /// be replaced. The operand may be used more than once; every instance
923 /// will be replaced.
924 Value *OperandValToReplace;
926 /// PostIncLoops - If this user is to use the post-incremented value of an
927 /// induction variable, this variable is non-null and holds the loop
928 /// associated with the induction variable.
929 PostIncLoopSet PostIncLoops;
931 /// LUIdx - The index of the LSRUse describing the expression which
932 /// this fixup needs, minus an offset (below).
935 /// Offset - A constant offset to be added to the LSRUse expression.
936 /// This allows multiple fixups to share the same LSRUse with different
937 /// offsets, for example in an unrolled loop.
940 bool isUseFullyOutsideLoop(const Loop *L) const;
944 void print(raw_ostream &OS) const;
951 : UserInst(0), OperandValToReplace(0), LUIdx(~size_t(0)), Offset(0) {}
953 /// isUseFullyOutsideLoop - Test whether this fixup always uses its
954 /// value outside of the given loop.
955 bool LSRFixup::isUseFullyOutsideLoop(const Loop *L) const {
956 // PHI nodes use their value in their incoming blocks.
957 if (const PHINode *PN = dyn_cast<PHINode>(UserInst)) {
958 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
959 if (PN->getIncomingValue(i) == OperandValToReplace &&
960 L->contains(PN->getIncomingBlock(i)))
965 return !L->contains(UserInst);
968 void LSRFixup::print(raw_ostream &OS) const {
970 // Store is common and interesting enough to be worth special-casing.
971 if (StoreInst *Store = dyn_cast<StoreInst>(UserInst)) {
973 WriteAsOperand(OS, Store->getOperand(0), /*PrintType=*/false);
974 } else if (UserInst->getType()->isVoidTy())
975 OS << UserInst->getOpcodeName();
977 WriteAsOperand(OS, UserInst, /*PrintType=*/false);
979 OS << ", OperandValToReplace=";
980 WriteAsOperand(OS, OperandValToReplace, /*PrintType=*/false);
982 for (PostIncLoopSet::const_iterator I = PostIncLoops.begin(),
983 E = PostIncLoops.end(); I != E; ++I) {
984 OS << ", PostIncLoop=";
985 WriteAsOperand(OS, (*I)->getHeader(), /*PrintType=*/false);
988 if (LUIdx != ~size_t(0))
989 OS << ", LUIdx=" << LUIdx;
992 OS << ", Offset=" << Offset;
995 void LSRFixup::dump() const {
996 print(errs()); errs() << '\n';
1001 /// UniquifierDenseMapInfo - A DenseMapInfo implementation for holding
1002 /// DenseMaps and DenseSets of sorted SmallVectors of const SCEV*.
1003 struct UniquifierDenseMapInfo {
1004 static SmallVector<const SCEV *, 2> getEmptyKey() {
1005 SmallVector<const SCEV *, 2> V;
1006 V.push_back(reinterpret_cast<const SCEV *>(-1));
1010 static SmallVector<const SCEV *, 2> getTombstoneKey() {
1011 SmallVector<const SCEV *, 2> V;
1012 V.push_back(reinterpret_cast<const SCEV *>(-2));
1016 static unsigned getHashValue(const SmallVector<const SCEV *, 2> &V) {
1017 unsigned Result = 0;
1018 for (SmallVectorImpl<const SCEV *>::const_iterator I = V.begin(),
1019 E = V.end(); I != E; ++I)
1020 Result ^= DenseMapInfo<const SCEV *>::getHashValue(*I);
1024 static bool isEqual(const SmallVector<const SCEV *, 2> &LHS,
1025 const SmallVector<const SCEV *, 2> &RHS) {
1030 /// LSRUse - This class holds the state that LSR keeps for each use in
1031 /// IVUsers, as well as uses invented by LSR itself. It includes information
1032 /// about what kinds of things can be folded into the user, information about
1033 /// the user itself, and information about how the use may be satisfied.
1034 /// TODO: Represent multiple users of the same expression in common?
1036 DenseSet<SmallVector<const SCEV *, 2>, UniquifierDenseMapInfo> Uniquifier;
1039 /// KindType - An enum for a kind of use, indicating what types of
1040 /// scaled and immediate operands it might support.
1042 Basic, ///< A normal use, with no folding.
1043 Special, ///< A special case of basic, allowing -1 scales.
1044 Address, ///< An address use; folding according to TargetLowering
1045 ICmpZero ///< An equality icmp with both operands folded into one.
1046 // TODO: Add a generic icmp too?
1052 SmallVector<int64_t, 8> Offsets;
1056 /// AllFixupsOutsideLoop - This records whether all of the fixups using this
1057 /// LSRUse are outside of the loop, in which case some special-case heuristics
1059 bool AllFixupsOutsideLoop;
1061 /// WidestFixupType - This records the widest use type for any fixup using
1062 /// this LSRUse. FindUseWithSimilarFormula can't consider uses with different
1063 /// max fixup widths to be equivalent, because the narrower one may be relying
1064 /// on the implicit truncation to truncate away bogus bits.
1065 Type *WidestFixupType;
1067 /// Formulae - A list of ways to build a value that can satisfy this user.
1068 /// After the list is populated, one of these is selected heuristically and
1069 /// used to formulate a replacement for OperandValToReplace in UserInst.
1070 SmallVector<Formula, 12> Formulae;
1072 /// Regs - The set of register candidates used by all formulae in this LSRUse.
1073 SmallPtrSet<const SCEV *, 4> Regs;
1075 LSRUse(KindType K, Type *T) : Kind(K), AccessTy(T),
1076 MinOffset(INT64_MAX),
1077 MaxOffset(INT64_MIN),
1078 AllFixupsOutsideLoop(true),
1079 WidestFixupType(0) {}
1081 bool HasFormulaWithSameRegs(const Formula &F) const;
1082 bool InsertFormula(const Formula &F);
1083 void DeleteFormula(Formula &F);
1084 void RecomputeRegs(size_t LUIdx, RegUseTracker &Reguses);
1086 void print(raw_ostream &OS) const;
1092 /// HasFormula - Test whether this use as a formula which has the same
1093 /// registers as the given formula.
1094 bool LSRUse::HasFormulaWithSameRegs(const Formula &F) const {
1095 SmallVector<const SCEV *, 2> Key = F.BaseRegs;
1096 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1097 // Unstable sort by host order ok, because this is only used for uniquifying.
1098 std::sort(Key.begin(), Key.end());
1099 return Uniquifier.count(Key);
1102 /// InsertFormula - If the given formula has not yet been inserted, add it to
1103 /// the list, and return true. Return false otherwise.
1104 bool LSRUse::InsertFormula(const Formula &F) {
1105 SmallVector<const SCEV *, 2> Key = F.BaseRegs;
1106 if (F.ScaledReg) Key.push_back(F.ScaledReg);
1107 // Unstable sort by host order ok, because this is only used for uniquifying.
1108 std::sort(Key.begin(), Key.end());
1110 if (!Uniquifier.insert(Key).second)
1113 // Using a register to hold the value of 0 is not profitable.
1114 assert((!F.ScaledReg || !F.ScaledReg->isZero()) &&
1115 "Zero allocated in a scaled register!");
1117 for (SmallVectorImpl<const SCEV *>::const_iterator I =
1118 F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I)
1119 assert(!(*I)->isZero() && "Zero allocated in a base register!");
1122 // Add the formula to the list.
1123 Formulae.push_back(F);
1125 // Record registers now being used by this use.
1126 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1131 /// DeleteFormula - Remove the given formula from this use's list.
1132 void LSRUse::DeleteFormula(Formula &F) {
1133 if (&F != &Formulae.back())
1134 std::swap(F, Formulae.back());
1135 Formulae.pop_back();
1138 /// RecomputeRegs - Recompute the Regs field, and update RegUses.
1139 void LSRUse::RecomputeRegs(size_t LUIdx, RegUseTracker &RegUses) {
1140 // Now that we've filtered out some formulae, recompute the Regs set.
1141 SmallPtrSet<const SCEV *, 4> OldRegs = Regs;
1143 for (SmallVectorImpl<Formula>::const_iterator I = Formulae.begin(),
1144 E = Formulae.end(); I != E; ++I) {
1145 const Formula &F = *I;
1146 if (F.ScaledReg) Regs.insert(F.ScaledReg);
1147 Regs.insert(F.BaseRegs.begin(), F.BaseRegs.end());
1150 // Update the RegTracker.
1151 for (SmallPtrSet<const SCEV *, 4>::iterator I = OldRegs.begin(),
1152 E = OldRegs.end(); I != E; ++I)
1153 if (!Regs.count(*I))
1154 RegUses.DropRegister(*I, LUIdx);
1157 void LSRUse::print(raw_ostream &OS) const {
1158 OS << "LSR Use: Kind=";
1160 case Basic: OS << "Basic"; break;
1161 case Special: OS << "Special"; break;
1162 case ICmpZero: OS << "ICmpZero"; break;
1164 OS << "Address of ";
1165 if (AccessTy->isPointerTy())
1166 OS << "pointer"; // the full pointer type could be really verbose
1171 OS << ", Offsets={";
1172 for (SmallVectorImpl<int64_t>::const_iterator I = Offsets.begin(),
1173 E = Offsets.end(); I != E; ++I) {
1175 if (llvm::next(I) != E)
1180 if (AllFixupsOutsideLoop)
1181 OS << ", all-fixups-outside-loop";
1183 if (WidestFixupType)
1184 OS << ", widest fixup type: " << *WidestFixupType;
1187 void LSRUse::dump() const {
1188 print(errs()); errs() << '\n';
1191 /// isLegalUse - Test whether the use described by AM is "legal", meaning it can
1192 /// be completely folded into the user instruction at isel time. This includes
1193 /// address-mode folding and special icmp tricks.
1194 static bool isLegalUse(const TargetLowering::AddrMode &AM,
1195 LSRUse::KindType Kind, Type *AccessTy,
1196 const TargetLowering *TLI) {
1198 case LSRUse::Address:
1199 // If we have low-level target information, ask the target if it can
1200 // completely fold this address.
1201 if (TLI) return TLI->isLegalAddressingMode(AM, AccessTy);
1203 // Otherwise, just guess that reg+reg addressing is legal.
1204 return !AM.BaseGV && AM.BaseOffs == 0 && AM.Scale <= 1;
1206 case LSRUse::ICmpZero:
1207 // There's not even a target hook for querying whether it would be legal to
1208 // fold a GV into an ICmp.
1212 // ICmp only has two operands; don't allow more than two non-trivial parts.
1213 if (AM.Scale != 0 && AM.HasBaseReg && AM.BaseOffs != 0)
1216 // ICmp only supports no scale or a -1 scale, as we can "fold" a -1 scale by
1217 // putting the scaled register in the other operand of the icmp.
1218 if (AM.Scale != 0 && AM.Scale != -1)
1221 // If we have low-level target information, ask the target if it can fold an
1222 // integer immediate on an icmp.
1223 if (AM.BaseOffs != 0) {
1224 if (TLI) return TLI->isLegalICmpImmediate(-(uint64_t)AM.BaseOffs);
1231 // Only handle single-register values.
1232 return !AM.BaseGV && AM.Scale == 0 && AM.BaseOffs == 0;
1234 case LSRUse::Special:
1235 // Only handle -1 scales, or no scale.
1236 return AM.Scale == 0 || AM.Scale == -1;
1242 static bool isLegalUse(TargetLowering::AddrMode AM,
1243 int64_t MinOffset, int64_t MaxOffset,
1244 LSRUse::KindType Kind, Type *AccessTy,
1245 const TargetLowering *TLI) {
1246 // Check for overflow.
1247 if (((int64_t)((uint64_t)AM.BaseOffs + MinOffset) > AM.BaseOffs) !=
1250 AM.BaseOffs = (uint64_t)AM.BaseOffs + MinOffset;
1251 if (isLegalUse(AM, Kind, AccessTy, TLI)) {
1252 AM.BaseOffs = (uint64_t)AM.BaseOffs - MinOffset;
1253 // Check for overflow.
1254 if (((int64_t)((uint64_t)AM.BaseOffs + MaxOffset) > AM.BaseOffs) !=
1257 AM.BaseOffs = (uint64_t)AM.BaseOffs + MaxOffset;
1258 return isLegalUse(AM, Kind, AccessTy, TLI);
1263 static bool isAlwaysFoldable(int64_t BaseOffs,
1264 GlobalValue *BaseGV,
1266 LSRUse::KindType Kind, Type *AccessTy,
1267 const TargetLowering *TLI) {
1268 // Fast-path: zero is always foldable.
1269 if (BaseOffs == 0 && !BaseGV) return true;
1271 // Conservatively, create an address with an immediate and a
1272 // base and a scale.
1273 TargetLowering::AddrMode AM;
1274 AM.BaseOffs = BaseOffs;
1276 AM.HasBaseReg = HasBaseReg;
1277 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1279 // Canonicalize a scale of 1 to a base register if the formula doesn't
1280 // already have a base register.
1281 if (!AM.HasBaseReg && AM.Scale == 1) {
1283 AM.HasBaseReg = true;
1286 return isLegalUse(AM, Kind, AccessTy, TLI);
1289 static bool isAlwaysFoldable(const SCEV *S,
1290 int64_t MinOffset, int64_t MaxOffset,
1292 LSRUse::KindType Kind, Type *AccessTy,
1293 const TargetLowering *TLI,
1294 ScalarEvolution &SE) {
1295 // Fast-path: zero is always foldable.
1296 if (S->isZero()) return true;
1298 // Conservatively, create an address with an immediate and a
1299 // base and a scale.
1300 int64_t BaseOffs = ExtractImmediate(S, SE);
1301 GlobalValue *BaseGV = ExtractSymbol(S, SE);
1303 // If there's anything else involved, it's not foldable.
1304 if (!S->isZero()) return false;
1306 // Fast-path: zero is always foldable.
1307 if (BaseOffs == 0 && !BaseGV) return true;
1309 // Conservatively, create an address with an immediate and a
1310 // base and a scale.
1311 TargetLowering::AddrMode AM;
1312 AM.BaseOffs = BaseOffs;
1314 AM.HasBaseReg = HasBaseReg;
1315 AM.Scale = Kind == LSRUse::ICmpZero ? -1 : 1;
1317 return isLegalUse(AM, MinOffset, MaxOffset, Kind, AccessTy, TLI);
1322 /// UseMapDenseMapInfo - A DenseMapInfo implementation for holding
1323 /// DenseMaps and DenseSets of pairs of const SCEV* and LSRUse::Kind.
1324 struct UseMapDenseMapInfo {
1325 static std::pair<const SCEV *, LSRUse::KindType> getEmptyKey() {
1326 return std::make_pair(reinterpret_cast<const SCEV *>(-1), LSRUse::Basic);
1329 static std::pair<const SCEV *, LSRUse::KindType> getTombstoneKey() {
1330 return std::make_pair(reinterpret_cast<const SCEV *>(-2), LSRUse::Basic);
1334 getHashValue(const std::pair<const SCEV *, LSRUse::KindType> &V) {
1335 unsigned Result = DenseMapInfo<const SCEV *>::getHashValue(V.first);
1336 Result ^= DenseMapInfo<unsigned>::getHashValue(unsigned(V.second));
1340 static bool isEqual(const std::pair<const SCEV *, LSRUse::KindType> &LHS,
1341 const std::pair<const SCEV *, LSRUse::KindType> &RHS) {
1346 /// LSRInstance - This class holds state for the main loop strength reduction
1350 ScalarEvolution &SE;
1353 const TargetLowering *const TLI;
1357 /// IVIncInsertPos - This is the insert position that the current loop's
1358 /// induction variable increment should be placed. In simple loops, this is
1359 /// the latch block's terminator. But in more complicated cases, this is a
1360 /// position which will dominate all the in-loop post-increment users.
1361 Instruction *IVIncInsertPos;
1363 /// Factors - Interesting factors between use strides.
1364 SmallSetVector<int64_t, 8> Factors;
1366 /// Types - Interesting use types, to facilitate truncation reuse.
1367 SmallSetVector<Type *, 4> Types;
1369 /// Fixups - The list of operands which are to be replaced.
1370 SmallVector<LSRFixup, 16> Fixups;
1372 /// Uses - The list of interesting uses.
1373 SmallVector<LSRUse, 16> Uses;
1375 /// RegUses - Track which uses use which register candidates.
1376 RegUseTracker RegUses;
1378 void OptimizeShadowIV();
1379 bool FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse);
1380 ICmpInst *OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse);
1381 void OptimizeLoopTermCond();
1383 void CollectInterestingTypesAndFactors();
1384 void CollectFixupsAndInitialFormulae();
1386 LSRFixup &getNewFixup() {
1387 Fixups.push_back(LSRFixup());
1388 return Fixups.back();
1391 // Support for sharing of LSRUses between LSRFixups.
1392 typedef DenseMap<std::pair<const SCEV *, LSRUse::KindType>,
1394 UseMapDenseMapInfo> UseMapTy;
1397 bool reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
1398 LSRUse::KindType Kind, Type *AccessTy);
1400 std::pair<size_t, int64_t> getUse(const SCEV *&Expr,
1401 LSRUse::KindType Kind,
1404 void DeleteUse(LSRUse &LU, size_t LUIdx);
1406 LSRUse *FindUseWithSimilarFormula(const Formula &F, const LSRUse &OrigLU);
1409 void InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1410 void InsertSupplementalFormula(const SCEV *S, LSRUse &LU, size_t LUIdx);
1411 void CountRegisters(const Formula &F, size_t LUIdx);
1412 bool InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F);
1414 void CollectLoopInvariantFixupsAndFormulae();
1416 void GenerateReassociations(LSRUse &LU, unsigned LUIdx, Formula Base,
1417 unsigned Depth = 0);
1418 void GenerateCombinations(LSRUse &LU, unsigned LUIdx, Formula Base);
1419 void GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1420 void GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx, Formula Base);
1421 void GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1422 void GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base);
1423 void GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base);
1424 void GenerateCrossUseConstantOffsets();
1425 void GenerateAllReuseFormulae();
1427 void FilterOutUndesirableDedicatedRegisters();
1429 size_t EstimateSearchSpaceComplexity() const;
1430 void NarrowSearchSpaceByDetectingSupersets();
1431 void NarrowSearchSpaceByCollapsingUnrolledCode();
1432 void NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
1433 void NarrowSearchSpaceByPickingWinnerRegs();
1434 void NarrowSearchSpaceUsingHeuristics();
1436 void SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
1438 SmallVectorImpl<const Formula *> &Workspace,
1439 const Cost &CurCost,
1440 const SmallPtrSet<const SCEV *, 16> &CurRegs,
1441 DenseSet<const SCEV *> &VisitedRegs) const;
1442 void Solve(SmallVectorImpl<const Formula *> &Solution) const;
1444 BasicBlock::iterator
1445 HoistInsertPosition(BasicBlock::iterator IP,
1446 const SmallVectorImpl<Instruction *> &Inputs) const;
1447 BasicBlock::iterator AdjustInsertPositionForExpand(BasicBlock::iterator IP,
1449 const LSRUse &LU) const;
1451 Value *Expand(const LSRFixup &LF,
1453 BasicBlock::iterator IP,
1454 SCEVExpander &Rewriter,
1455 SmallVectorImpl<WeakVH> &DeadInsts) const;
1456 void RewriteForPHI(PHINode *PN, const LSRFixup &LF,
1458 SCEVExpander &Rewriter,
1459 SmallVectorImpl<WeakVH> &DeadInsts,
1461 void Rewrite(const LSRFixup &LF,
1463 SCEVExpander &Rewriter,
1464 SmallVectorImpl<WeakVH> &DeadInsts,
1466 void ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
1469 LSRInstance(const TargetLowering *tli, Loop *l, Pass *P);
1471 bool getChanged() const { return Changed; }
1473 void print_factors_and_types(raw_ostream &OS) const;
1474 void print_fixups(raw_ostream &OS) const;
1475 void print_uses(raw_ostream &OS) const;
1476 void print(raw_ostream &OS) const;
1482 /// OptimizeShadowIV - If IV is used in a int-to-float cast
1483 /// inside the loop then try to eliminate the cast operation.
1484 void LSRInstance::OptimizeShadowIV() {
1485 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1486 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1489 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end();
1490 UI != E; /* empty */) {
1491 IVUsers::const_iterator CandidateUI = UI;
1493 Instruction *ShadowUse = CandidateUI->getUser();
1494 Type *DestTy = NULL;
1495 bool IsSigned = false;
1497 /* If shadow use is a int->float cast then insert a second IV
1498 to eliminate this cast.
1500 for (unsigned i = 0; i < n; ++i)
1506 for (unsigned i = 0; i < n; ++i, ++d)
1509 if (UIToFPInst *UCast = dyn_cast<UIToFPInst>(CandidateUI->getUser())) {
1511 DestTy = UCast->getDestTy();
1513 else if (SIToFPInst *SCast = dyn_cast<SIToFPInst>(CandidateUI->getUser())) {
1515 DestTy = SCast->getDestTy();
1517 if (!DestTy) continue;
1520 // If target does not support DestTy natively then do not apply
1521 // this transformation.
1522 EVT DVT = TLI->getValueType(DestTy);
1523 if (!TLI->isTypeLegal(DVT)) continue;
1526 PHINode *PH = dyn_cast<PHINode>(ShadowUse->getOperand(0));
1528 if (PH->getNumIncomingValues() != 2) continue;
1530 Type *SrcTy = PH->getType();
1531 int Mantissa = DestTy->getFPMantissaWidth();
1532 if (Mantissa == -1) continue;
1533 if ((int)SE.getTypeSizeInBits(SrcTy) > Mantissa)
1536 unsigned Entry, Latch;
1537 if (PH->getIncomingBlock(0) == L->getLoopPreheader()) {
1545 ConstantInt *Init = dyn_cast<ConstantInt>(PH->getIncomingValue(Entry));
1546 if (!Init) continue;
1547 Constant *NewInit = ConstantFP::get(DestTy, IsSigned ?
1548 (double)Init->getSExtValue() :
1549 (double)Init->getZExtValue());
1551 BinaryOperator *Incr =
1552 dyn_cast<BinaryOperator>(PH->getIncomingValue(Latch));
1553 if (!Incr) continue;
1554 if (Incr->getOpcode() != Instruction::Add
1555 && Incr->getOpcode() != Instruction::Sub)
1558 /* Initialize new IV, double d = 0.0 in above example. */
1559 ConstantInt *C = NULL;
1560 if (Incr->getOperand(0) == PH)
1561 C = dyn_cast<ConstantInt>(Incr->getOperand(1));
1562 else if (Incr->getOperand(1) == PH)
1563 C = dyn_cast<ConstantInt>(Incr->getOperand(0));
1569 // Ignore negative constants, as the code below doesn't handle them
1570 // correctly. TODO: Remove this restriction.
1571 if (!C->getValue().isStrictlyPositive()) continue;
1573 /* Add new PHINode. */
1574 PHINode *NewPH = PHINode::Create(DestTy, 2, "IV.S.", PH);
1576 /* create new increment. '++d' in above example. */
1577 Constant *CFP = ConstantFP::get(DestTy, C->getZExtValue());
1578 BinaryOperator *NewIncr =
1579 BinaryOperator::Create(Incr->getOpcode() == Instruction::Add ?
1580 Instruction::FAdd : Instruction::FSub,
1581 NewPH, CFP, "IV.S.next.", Incr);
1583 NewPH->addIncoming(NewInit, PH->getIncomingBlock(Entry));
1584 NewPH->addIncoming(NewIncr, PH->getIncomingBlock(Latch));
1586 /* Remove cast operation */
1587 ShadowUse->replaceAllUsesWith(NewPH);
1588 ShadowUse->eraseFromParent();
1594 /// FindIVUserForCond - If Cond has an operand that is an expression of an IV,
1595 /// set the IV user and stride information and return true, otherwise return
1597 bool LSRInstance::FindIVUserForCond(ICmpInst *Cond, IVStrideUse *&CondUse) {
1598 for (IVUsers::iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
1599 if (UI->getUser() == Cond) {
1600 // NOTE: we could handle setcc instructions with multiple uses here, but
1601 // InstCombine does it as well for simple uses, it's not clear that it
1602 // occurs enough in real life to handle.
1609 /// OptimizeMax - Rewrite the loop's terminating condition if it uses
1610 /// a max computation.
1612 /// This is a narrow solution to a specific, but acute, problem. For loops
1618 /// } while (++i < n);
1620 /// the trip count isn't just 'n', because 'n' might not be positive. And
1621 /// unfortunately this can come up even for loops where the user didn't use
1622 /// a C do-while loop. For example, seemingly well-behaved top-test loops
1623 /// will commonly be lowered like this:
1629 /// } while (++i < n);
1632 /// and then it's possible for subsequent optimization to obscure the if
1633 /// test in such a way that indvars can't find it.
1635 /// When indvars can't find the if test in loops like this, it creates a
1636 /// max expression, which allows it to give the loop a canonical
1637 /// induction variable:
1640 /// max = n < 1 ? 1 : n;
1643 /// } while (++i != max);
1645 /// Canonical induction variables are necessary because the loop passes
1646 /// are designed around them. The most obvious example of this is the
1647 /// LoopInfo analysis, which doesn't remember trip count values. It
1648 /// expects to be able to rediscover the trip count each time it is
1649 /// needed, and it does this using a simple analysis that only succeeds if
1650 /// the loop has a canonical induction variable.
1652 /// However, when it comes time to generate code, the maximum operation
1653 /// can be quite costly, especially if it's inside of an outer loop.
1655 /// This function solves this problem by detecting this type of loop and
1656 /// rewriting their conditions from ICMP_NE back to ICMP_SLT, and deleting
1657 /// the instructions for the maximum computation.
1659 ICmpInst *LSRInstance::OptimizeMax(ICmpInst *Cond, IVStrideUse* &CondUse) {
1660 // Check that the loop matches the pattern we're looking for.
1661 if (Cond->getPredicate() != CmpInst::ICMP_EQ &&
1662 Cond->getPredicate() != CmpInst::ICMP_NE)
1665 SelectInst *Sel = dyn_cast<SelectInst>(Cond->getOperand(1));
1666 if (!Sel || !Sel->hasOneUse()) return Cond;
1668 const SCEV *BackedgeTakenCount = SE.getBackedgeTakenCount(L);
1669 if (isa<SCEVCouldNotCompute>(BackedgeTakenCount))
1671 const SCEV *One = SE.getConstant(BackedgeTakenCount->getType(), 1);
1673 // Add one to the backedge-taken count to get the trip count.
1674 const SCEV *IterationCount = SE.getAddExpr(One, BackedgeTakenCount);
1675 if (IterationCount != SE.getSCEV(Sel)) return Cond;
1677 // Check for a max calculation that matches the pattern. There's no check
1678 // for ICMP_ULE here because the comparison would be with zero, which
1679 // isn't interesting.
1680 CmpInst::Predicate Pred = ICmpInst::BAD_ICMP_PREDICATE;
1681 const SCEVNAryExpr *Max = 0;
1682 if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(BackedgeTakenCount)) {
1683 Pred = ICmpInst::ICMP_SLE;
1685 } else if (const SCEVSMaxExpr *S = dyn_cast<SCEVSMaxExpr>(IterationCount)) {
1686 Pred = ICmpInst::ICMP_SLT;
1688 } else if (const SCEVUMaxExpr *U = dyn_cast<SCEVUMaxExpr>(IterationCount)) {
1689 Pred = ICmpInst::ICMP_ULT;
1696 // To handle a max with more than two operands, this optimization would
1697 // require additional checking and setup.
1698 if (Max->getNumOperands() != 2)
1701 const SCEV *MaxLHS = Max->getOperand(0);
1702 const SCEV *MaxRHS = Max->getOperand(1);
1704 // ScalarEvolution canonicalizes constants to the left. For < and >, look
1705 // for a comparison with 1. For <= and >=, a comparison with zero.
1707 (ICmpInst::isTrueWhenEqual(Pred) ? !MaxLHS->isZero() : (MaxLHS != One)))
1710 // Check the relevant induction variable for conformance to
1712 const SCEV *IV = SE.getSCEV(Cond->getOperand(0));
1713 const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(IV);
1714 if (!AR || !AR->isAffine() ||
1715 AR->getStart() != One ||
1716 AR->getStepRecurrence(SE) != One)
1719 assert(AR->getLoop() == L &&
1720 "Loop condition operand is an addrec in a different loop!");
1722 // Check the right operand of the select, and remember it, as it will
1723 // be used in the new comparison instruction.
1725 if (ICmpInst::isTrueWhenEqual(Pred)) {
1726 // Look for n+1, and grab n.
1727 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(1)))
1728 if (isa<ConstantInt>(BO->getOperand(1)) &&
1729 cast<ConstantInt>(BO->getOperand(1))->isOne() &&
1730 SE.getSCEV(BO->getOperand(0)) == MaxRHS)
1731 NewRHS = BO->getOperand(0);
1732 if (AddOperator *BO = dyn_cast<AddOperator>(Sel->getOperand(2)))
1733 if (isa<ConstantInt>(BO->getOperand(1)) &&
1734 cast<ConstantInt>(BO->getOperand(1))->isOne() &&
1735 SE.getSCEV(BO->getOperand(0)) == MaxRHS)
1736 NewRHS = BO->getOperand(0);
1739 } else if (SE.getSCEV(Sel->getOperand(1)) == MaxRHS)
1740 NewRHS = Sel->getOperand(1);
1741 else if (SE.getSCEV(Sel->getOperand(2)) == MaxRHS)
1742 NewRHS = Sel->getOperand(2);
1743 else if (const SCEVUnknown *SU = dyn_cast<SCEVUnknown>(MaxRHS))
1744 NewRHS = SU->getValue();
1746 // Max doesn't match expected pattern.
1749 // Determine the new comparison opcode. It may be signed or unsigned,
1750 // and the original comparison may be either equality or inequality.
1751 if (Cond->getPredicate() == CmpInst::ICMP_EQ)
1752 Pred = CmpInst::getInversePredicate(Pred);
1754 // Ok, everything looks ok to change the condition into an SLT or SGE and
1755 // delete the max calculation.
1757 new ICmpInst(Cond, Pred, Cond->getOperand(0), NewRHS, "scmp");
1759 // Delete the max calculation instructions.
1760 Cond->replaceAllUsesWith(NewCond);
1761 CondUse->setUser(NewCond);
1762 Instruction *Cmp = cast<Instruction>(Sel->getOperand(0));
1763 Cond->eraseFromParent();
1764 Sel->eraseFromParent();
1765 if (Cmp->use_empty())
1766 Cmp->eraseFromParent();
1770 /// OptimizeLoopTermCond - Change loop terminating condition to use the
1771 /// postinc iv when possible.
1773 LSRInstance::OptimizeLoopTermCond() {
1774 SmallPtrSet<Instruction *, 4> PostIncs;
1776 BasicBlock *LatchBlock = L->getLoopLatch();
1777 SmallVector<BasicBlock*, 8> ExitingBlocks;
1778 L->getExitingBlocks(ExitingBlocks);
1780 for (unsigned i = 0, e = ExitingBlocks.size(); i != e; ++i) {
1781 BasicBlock *ExitingBlock = ExitingBlocks[i];
1783 // Get the terminating condition for the loop if possible. If we
1784 // can, we want to change it to use a post-incremented version of its
1785 // induction variable, to allow coalescing the live ranges for the IV into
1786 // one register value.
1788 BranchInst *TermBr = dyn_cast<BranchInst>(ExitingBlock->getTerminator());
1791 // FIXME: Overly conservative, termination condition could be an 'or' etc..
1792 if (TermBr->isUnconditional() || !isa<ICmpInst>(TermBr->getCondition()))
1795 // Search IVUsesByStride to find Cond's IVUse if there is one.
1796 IVStrideUse *CondUse = 0;
1797 ICmpInst *Cond = cast<ICmpInst>(TermBr->getCondition());
1798 if (!FindIVUserForCond(Cond, CondUse))
1801 // If the trip count is computed in terms of a max (due to ScalarEvolution
1802 // being unable to find a sufficient guard, for example), change the loop
1803 // comparison to use SLT or ULT instead of NE.
1804 // One consequence of doing this now is that it disrupts the count-down
1805 // optimization. That's not always a bad thing though, because in such
1806 // cases it may still be worthwhile to avoid a max.
1807 Cond = OptimizeMax(Cond, CondUse);
1809 // If this exiting block dominates the latch block, it may also use
1810 // the post-inc value if it won't be shared with other uses.
1811 // Check for dominance.
1812 if (!DT.dominates(ExitingBlock, LatchBlock))
1815 // Conservatively avoid trying to use the post-inc value in non-latch
1816 // exits if there may be pre-inc users in intervening blocks.
1817 if (LatchBlock != ExitingBlock)
1818 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI)
1819 // Test if the use is reachable from the exiting block. This dominator
1820 // query is a conservative approximation of reachability.
1821 if (&*UI != CondUse &&
1822 !DT.properlyDominates(UI->getUser()->getParent(), ExitingBlock)) {
1823 // Conservatively assume there may be reuse if the quotient of their
1824 // strides could be a legal scale.
1825 const SCEV *A = IU.getStride(*CondUse, L);
1826 const SCEV *B = IU.getStride(*UI, L);
1827 if (!A || !B) continue;
1828 if (SE.getTypeSizeInBits(A->getType()) !=
1829 SE.getTypeSizeInBits(B->getType())) {
1830 if (SE.getTypeSizeInBits(A->getType()) >
1831 SE.getTypeSizeInBits(B->getType()))
1832 B = SE.getSignExtendExpr(B, A->getType());
1834 A = SE.getSignExtendExpr(A, B->getType());
1836 if (const SCEVConstant *D =
1837 dyn_cast_or_null<SCEVConstant>(getExactSDiv(B, A, SE))) {
1838 const ConstantInt *C = D->getValue();
1839 // Stride of one or negative one can have reuse with non-addresses.
1840 if (C->isOne() || C->isAllOnesValue())
1841 goto decline_post_inc;
1842 // Avoid weird situations.
1843 if (C->getValue().getMinSignedBits() >= 64 ||
1844 C->getValue().isMinSignedValue())
1845 goto decline_post_inc;
1846 // Without TLI, assume that any stride might be valid, and so any
1847 // use might be shared.
1849 goto decline_post_inc;
1850 // Check for possible scaled-address reuse.
1851 Type *AccessTy = getAccessType(UI->getUser());
1852 TargetLowering::AddrMode AM;
1853 AM.Scale = C->getSExtValue();
1854 if (TLI->isLegalAddressingMode(AM, AccessTy))
1855 goto decline_post_inc;
1856 AM.Scale = -AM.Scale;
1857 if (TLI->isLegalAddressingMode(AM, AccessTy))
1858 goto decline_post_inc;
1862 DEBUG(dbgs() << " Change loop exiting icmp to use postinc iv: "
1865 // It's possible for the setcc instruction to be anywhere in the loop, and
1866 // possible for it to have multiple users. If it is not immediately before
1867 // the exiting block branch, move it.
1868 if (&*++BasicBlock::iterator(Cond) != TermBr) {
1869 if (Cond->hasOneUse()) {
1870 Cond->moveBefore(TermBr);
1872 // Clone the terminating condition and insert into the loopend.
1873 ICmpInst *OldCond = Cond;
1874 Cond = cast<ICmpInst>(Cond->clone());
1875 Cond->setName(L->getHeader()->getName() + ".termcond");
1876 ExitingBlock->getInstList().insert(TermBr, Cond);
1878 // Clone the IVUse, as the old use still exists!
1879 CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace());
1880 TermBr->replaceUsesOfWith(OldCond, Cond);
1884 // If we get to here, we know that we can transform the setcc instruction to
1885 // use the post-incremented version of the IV, allowing us to coalesce the
1886 // live ranges for the IV correctly.
1887 CondUse->transformToPostInc(L);
1890 PostIncs.insert(Cond);
1894 // Determine an insertion point for the loop induction variable increment. It
1895 // must dominate all the post-inc comparisons we just set up, and it must
1896 // dominate the loop latch edge.
1897 IVIncInsertPos = L->getLoopLatch()->getTerminator();
1898 for (SmallPtrSet<Instruction *, 4>::const_iterator I = PostIncs.begin(),
1899 E = PostIncs.end(); I != E; ++I) {
1901 DT.findNearestCommonDominator(IVIncInsertPos->getParent(),
1903 if (BB == (*I)->getParent())
1904 IVIncInsertPos = *I;
1905 else if (BB != IVIncInsertPos->getParent())
1906 IVIncInsertPos = BB->getTerminator();
1910 /// reconcileNewOffset - Determine if the given use can accommodate a fixup
1911 /// at the given offset and other details. If so, update the use and
1914 LSRInstance::reconcileNewOffset(LSRUse &LU, int64_t NewOffset, bool HasBaseReg,
1915 LSRUse::KindType Kind, Type *AccessTy) {
1916 int64_t NewMinOffset = LU.MinOffset;
1917 int64_t NewMaxOffset = LU.MaxOffset;
1918 Type *NewAccessTy = AccessTy;
1920 // Check for a mismatched kind. It's tempting to collapse mismatched kinds to
1921 // something conservative, however this can pessimize in the case that one of
1922 // the uses will have all its uses outside the loop, for example.
1923 if (LU.Kind != Kind)
1925 // Conservatively assume HasBaseReg is true for now.
1926 if (NewOffset < LU.MinOffset) {
1927 if (!isAlwaysFoldable(LU.MaxOffset - NewOffset, 0, HasBaseReg,
1928 Kind, AccessTy, TLI))
1930 NewMinOffset = NewOffset;
1931 } else if (NewOffset > LU.MaxOffset) {
1932 if (!isAlwaysFoldable(NewOffset - LU.MinOffset, 0, HasBaseReg,
1933 Kind, AccessTy, TLI))
1935 NewMaxOffset = NewOffset;
1937 // Check for a mismatched access type, and fall back conservatively as needed.
1938 // TODO: Be less conservative when the type is similar and can use the same
1939 // addressing modes.
1940 if (Kind == LSRUse::Address && AccessTy != LU.AccessTy)
1941 NewAccessTy = Type::getVoidTy(AccessTy->getContext());
1944 LU.MinOffset = NewMinOffset;
1945 LU.MaxOffset = NewMaxOffset;
1946 LU.AccessTy = NewAccessTy;
1947 if (NewOffset != LU.Offsets.back())
1948 LU.Offsets.push_back(NewOffset);
1952 /// getUse - Return an LSRUse index and an offset value for a fixup which
1953 /// needs the given expression, with the given kind and optional access type.
1954 /// Either reuse an existing use or create a new one, as needed.
1955 std::pair<size_t, int64_t>
1956 LSRInstance::getUse(const SCEV *&Expr,
1957 LSRUse::KindType Kind, Type *AccessTy) {
1958 const SCEV *Copy = Expr;
1959 int64_t Offset = ExtractImmediate(Expr, SE);
1961 // Basic uses can't accept any offset, for example.
1962 if (!isAlwaysFoldable(Offset, 0, /*HasBaseReg=*/true, Kind, AccessTy, TLI)) {
1967 std::pair<UseMapTy::iterator, bool> P =
1968 UseMap.insert(std::make_pair(std::make_pair(Expr, Kind), 0));
1970 // A use already existed with this base.
1971 size_t LUIdx = P.first->second;
1972 LSRUse &LU = Uses[LUIdx];
1973 if (reconcileNewOffset(LU, Offset, /*HasBaseReg=*/true, Kind, AccessTy))
1975 return std::make_pair(LUIdx, Offset);
1978 // Create a new use.
1979 size_t LUIdx = Uses.size();
1980 P.first->second = LUIdx;
1981 Uses.push_back(LSRUse(Kind, AccessTy));
1982 LSRUse &LU = Uses[LUIdx];
1984 // We don't need to track redundant offsets, but we don't need to go out
1985 // of our way here to avoid them.
1986 if (LU.Offsets.empty() || Offset != LU.Offsets.back())
1987 LU.Offsets.push_back(Offset);
1989 LU.MinOffset = Offset;
1990 LU.MaxOffset = Offset;
1991 return std::make_pair(LUIdx, Offset);
1994 /// DeleteUse - Delete the given use from the Uses list.
1995 void LSRInstance::DeleteUse(LSRUse &LU, size_t LUIdx) {
1996 if (&LU != &Uses.back())
1997 std::swap(LU, Uses.back());
2001 RegUses.SwapAndDropUse(LUIdx, Uses.size());
2004 /// FindUseWithFormula - Look for a use distinct from OrigLU which is has
2005 /// a formula that has the same registers as the given formula.
2007 LSRInstance::FindUseWithSimilarFormula(const Formula &OrigF,
2008 const LSRUse &OrigLU) {
2009 // Search all uses for the formula. This could be more clever.
2010 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2011 LSRUse &LU = Uses[LUIdx];
2012 // Check whether this use is close enough to OrigLU, to see whether it's
2013 // worthwhile looking through its formulae.
2014 // Ignore ICmpZero uses because they may contain formulae generated by
2015 // GenerateICmpZeroScales, in which case adding fixup offsets may
2017 if (&LU != &OrigLU &&
2018 LU.Kind != LSRUse::ICmpZero &&
2019 LU.Kind == OrigLU.Kind && OrigLU.AccessTy == LU.AccessTy &&
2020 LU.WidestFixupType == OrigLU.WidestFixupType &&
2021 LU.HasFormulaWithSameRegs(OrigF)) {
2022 // Scan through this use's formulae.
2023 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
2024 E = LU.Formulae.end(); I != E; ++I) {
2025 const Formula &F = *I;
2026 // Check to see if this formula has the same registers and symbols
2028 if (F.BaseRegs == OrigF.BaseRegs &&
2029 F.ScaledReg == OrigF.ScaledReg &&
2030 F.AM.BaseGV == OrigF.AM.BaseGV &&
2031 F.AM.Scale == OrigF.AM.Scale &&
2032 F.UnfoldedOffset == OrigF.UnfoldedOffset) {
2033 if (F.AM.BaseOffs == 0)
2035 // This is the formula where all the registers and symbols matched;
2036 // there aren't going to be any others. Since we declined it, we
2037 // can skip the rest of the formulae and procede to the next LSRUse.
2044 // Nothing looked good.
2048 void LSRInstance::CollectInterestingTypesAndFactors() {
2049 SmallSetVector<const SCEV *, 4> Strides;
2051 // Collect interesting types and strides.
2052 SmallVector<const SCEV *, 4> Worklist;
2053 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
2054 const SCEV *Expr = IU.getExpr(*UI);
2056 // Collect interesting types.
2057 Types.insert(SE.getEffectiveSCEVType(Expr->getType()));
2059 // Add strides for mentioned loops.
2060 Worklist.push_back(Expr);
2062 const SCEV *S = Worklist.pop_back_val();
2063 if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2064 if (EnableNested || AR->getLoop() == L)
2065 Strides.insert(AR->getStepRecurrence(SE));
2066 Worklist.push_back(AR->getStart());
2067 } else if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2068 Worklist.append(Add->op_begin(), Add->op_end());
2070 } while (!Worklist.empty());
2073 // Compute interesting factors from the set of interesting strides.
2074 for (SmallSetVector<const SCEV *, 4>::const_iterator
2075 I = Strides.begin(), E = Strides.end(); I != E; ++I)
2076 for (SmallSetVector<const SCEV *, 4>::const_iterator NewStrideIter =
2077 llvm::next(I); NewStrideIter != E; ++NewStrideIter) {
2078 const SCEV *OldStride = *I;
2079 const SCEV *NewStride = *NewStrideIter;
2081 if (SE.getTypeSizeInBits(OldStride->getType()) !=
2082 SE.getTypeSizeInBits(NewStride->getType())) {
2083 if (SE.getTypeSizeInBits(OldStride->getType()) >
2084 SE.getTypeSizeInBits(NewStride->getType()))
2085 NewStride = SE.getSignExtendExpr(NewStride, OldStride->getType());
2087 OldStride = SE.getSignExtendExpr(OldStride, NewStride->getType());
2089 if (const SCEVConstant *Factor =
2090 dyn_cast_or_null<SCEVConstant>(getExactSDiv(NewStride, OldStride,
2092 if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2093 Factors.insert(Factor->getValue()->getValue().getSExtValue());
2094 } else if (const SCEVConstant *Factor =
2095 dyn_cast_or_null<SCEVConstant>(getExactSDiv(OldStride,
2098 if (Factor->getValue()->getValue().getMinSignedBits() <= 64)
2099 Factors.insert(Factor->getValue()->getValue().getSExtValue());
2103 // If all uses use the same type, don't bother looking for truncation-based
2105 if (Types.size() == 1)
2108 DEBUG(print_factors_and_types(dbgs()));
2111 void LSRInstance::CollectFixupsAndInitialFormulae() {
2112 for (IVUsers::const_iterator UI = IU.begin(), E = IU.end(); UI != E; ++UI) {
2114 LSRFixup &LF = getNewFixup();
2115 LF.UserInst = UI->getUser();
2116 LF.OperandValToReplace = UI->getOperandValToReplace();
2117 LF.PostIncLoops = UI->getPostIncLoops();
2119 LSRUse::KindType Kind = LSRUse::Basic;
2121 if (isAddressUse(LF.UserInst, LF.OperandValToReplace)) {
2122 Kind = LSRUse::Address;
2123 AccessTy = getAccessType(LF.UserInst);
2126 const SCEV *S = IU.getExpr(*UI);
2128 // Equality (== and !=) ICmps are special. We can rewrite (i == N) as
2129 // (N - i == 0), and this allows (N - i) to be the expression that we work
2130 // with rather than just N or i, so we can consider the register
2131 // requirements for both N and i at the same time. Limiting this code to
2132 // equality icmps is not a problem because all interesting loops use
2133 // equality icmps, thanks to IndVarSimplify.
2134 if (ICmpInst *CI = dyn_cast<ICmpInst>(LF.UserInst))
2135 if (CI->isEquality()) {
2136 // Swap the operands if needed to put the OperandValToReplace on the
2137 // left, for consistency.
2138 Value *NV = CI->getOperand(1);
2139 if (NV == LF.OperandValToReplace) {
2140 CI->setOperand(1, CI->getOperand(0));
2141 CI->setOperand(0, NV);
2142 NV = CI->getOperand(1);
2146 // x == y --> x - y == 0
2147 const SCEV *N = SE.getSCEV(NV);
2148 if (SE.isLoopInvariant(N, L)) {
2149 // S is normalized, so normalize N before folding it into S
2150 // to keep the result normalized.
2151 N = TransformForPostIncUse(Normalize, N, CI, 0,
2152 LF.PostIncLoops, SE, DT);
2153 Kind = LSRUse::ICmpZero;
2154 S = SE.getMinusSCEV(N, S);
2157 // -1 and the negations of all interesting strides (except the negation
2158 // of -1) are now also interesting.
2159 for (size_t i = 0, e = Factors.size(); i != e; ++i)
2160 if (Factors[i] != -1)
2161 Factors.insert(-(uint64_t)Factors[i]);
2165 // Set up the initial formula for this use.
2166 std::pair<size_t, int64_t> P = getUse(S, Kind, AccessTy);
2168 LF.Offset = P.second;
2169 LSRUse &LU = Uses[LF.LUIdx];
2170 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
2171 if (!LU.WidestFixupType ||
2172 SE.getTypeSizeInBits(LU.WidestFixupType) <
2173 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
2174 LU.WidestFixupType = LF.OperandValToReplace->getType();
2176 // If this is the first use of this LSRUse, give it a formula.
2177 if (LU.Formulae.empty()) {
2178 InsertInitialFormula(S, LU, LF.LUIdx);
2179 CountRegisters(LU.Formulae.back(), LF.LUIdx);
2183 DEBUG(print_fixups(dbgs()));
2186 /// InsertInitialFormula - Insert a formula for the given expression into
2187 /// the given use, separating out loop-variant portions from loop-invariant
2188 /// and loop-computable portions.
2190 LSRInstance::InsertInitialFormula(const SCEV *S, LSRUse &LU, size_t LUIdx) {
2192 F.InitialMatch(S, L, SE);
2193 bool Inserted = InsertFormula(LU, LUIdx, F);
2194 assert(Inserted && "Initial formula already exists!"); (void)Inserted;
2197 /// InsertSupplementalFormula - Insert a simple single-register formula for
2198 /// the given expression into the given use.
2200 LSRInstance::InsertSupplementalFormula(const SCEV *S,
2201 LSRUse &LU, size_t LUIdx) {
2203 F.BaseRegs.push_back(S);
2204 F.AM.HasBaseReg = true;
2205 bool Inserted = InsertFormula(LU, LUIdx, F);
2206 assert(Inserted && "Supplemental formula already exists!"); (void)Inserted;
2209 /// CountRegisters - Note which registers are used by the given formula,
2210 /// updating RegUses.
2211 void LSRInstance::CountRegisters(const Formula &F, size_t LUIdx) {
2213 RegUses.CountRegister(F.ScaledReg, LUIdx);
2214 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
2215 E = F.BaseRegs.end(); I != E; ++I)
2216 RegUses.CountRegister(*I, LUIdx);
2219 /// InsertFormula - If the given formula has not yet been inserted, add it to
2220 /// the list, and return true. Return false otherwise.
2221 bool LSRInstance::InsertFormula(LSRUse &LU, unsigned LUIdx, const Formula &F) {
2222 if (!LU.InsertFormula(F))
2225 CountRegisters(F, LUIdx);
2229 /// CollectLoopInvariantFixupsAndFormulae - Check for other uses of
2230 /// loop-invariant values which we're tracking. These other uses will pin these
2231 /// values in registers, making them less profitable for elimination.
2232 /// TODO: This currently misses non-constant addrec step registers.
2233 /// TODO: Should this give more weight to users inside the loop?
2235 LSRInstance::CollectLoopInvariantFixupsAndFormulae() {
2236 SmallVector<const SCEV *, 8> Worklist(RegUses.begin(), RegUses.end());
2237 SmallPtrSet<const SCEV *, 8> Inserted;
2239 while (!Worklist.empty()) {
2240 const SCEV *S = Worklist.pop_back_val();
2242 if (const SCEVNAryExpr *N = dyn_cast<SCEVNAryExpr>(S))
2243 Worklist.append(N->op_begin(), N->op_end());
2244 else if (const SCEVCastExpr *C = dyn_cast<SCEVCastExpr>(S))
2245 Worklist.push_back(C->getOperand());
2246 else if (const SCEVUDivExpr *D = dyn_cast<SCEVUDivExpr>(S)) {
2247 Worklist.push_back(D->getLHS());
2248 Worklist.push_back(D->getRHS());
2249 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(S)) {
2250 if (!Inserted.insert(U)) continue;
2251 const Value *V = U->getValue();
2252 if (const Instruction *Inst = dyn_cast<Instruction>(V)) {
2253 // Look for instructions defined outside the loop.
2254 if (L->contains(Inst)) continue;
2255 } else if (isa<UndefValue>(V))
2256 // Undef doesn't have a live range, so it doesn't matter.
2258 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
2260 const Instruction *UserInst = dyn_cast<Instruction>(*UI);
2261 // Ignore non-instructions.
2264 // Ignore instructions in other functions (as can happen with
2266 if (UserInst->getParent()->getParent() != L->getHeader()->getParent())
2268 // Ignore instructions not dominated by the loop.
2269 const BasicBlock *UseBB = !isa<PHINode>(UserInst) ?
2270 UserInst->getParent() :
2271 cast<PHINode>(UserInst)->getIncomingBlock(
2272 PHINode::getIncomingValueNumForOperand(UI.getOperandNo()));
2273 if (!DT.dominates(L->getHeader(), UseBB))
2275 // Ignore uses which are part of other SCEV expressions, to avoid
2276 // analyzing them multiple times.
2277 if (SE.isSCEVable(UserInst->getType())) {
2278 const SCEV *UserS = SE.getSCEV(const_cast<Instruction *>(UserInst));
2279 // If the user is a no-op, look through to its uses.
2280 if (!isa<SCEVUnknown>(UserS))
2284 SE.getUnknown(const_cast<Instruction *>(UserInst)));
2288 // Ignore icmp instructions which are already being analyzed.
2289 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(UserInst)) {
2290 unsigned OtherIdx = !UI.getOperandNo();
2291 Value *OtherOp = const_cast<Value *>(ICI->getOperand(OtherIdx));
2292 if (SE.hasComputableLoopEvolution(SE.getSCEV(OtherOp), L))
2296 LSRFixup &LF = getNewFixup();
2297 LF.UserInst = const_cast<Instruction *>(UserInst);
2298 LF.OperandValToReplace = UI.getUse();
2299 std::pair<size_t, int64_t> P = getUse(S, LSRUse::Basic, 0);
2301 LF.Offset = P.second;
2302 LSRUse &LU = Uses[LF.LUIdx];
2303 LU.AllFixupsOutsideLoop &= LF.isUseFullyOutsideLoop(L);
2304 if (!LU.WidestFixupType ||
2305 SE.getTypeSizeInBits(LU.WidestFixupType) <
2306 SE.getTypeSizeInBits(LF.OperandValToReplace->getType()))
2307 LU.WidestFixupType = LF.OperandValToReplace->getType();
2308 InsertSupplementalFormula(U, LU, LF.LUIdx);
2309 CountRegisters(LU.Formulae.back(), Uses.size() - 1);
2316 /// CollectSubexprs - Split S into subexpressions which can be pulled out into
2317 /// separate registers. If C is non-null, multiply each subexpression by C.
2318 static void CollectSubexprs(const SCEV *S, const SCEVConstant *C,
2319 SmallVectorImpl<const SCEV *> &Ops,
2321 ScalarEvolution &SE) {
2322 if (const SCEVAddExpr *Add = dyn_cast<SCEVAddExpr>(S)) {
2323 // Break out add operands.
2324 for (SCEVAddExpr::op_iterator I = Add->op_begin(), E = Add->op_end();
2326 CollectSubexprs(*I, C, Ops, L, SE);
2328 } else if (const SCEVAddRecExpr *AR = dyn_cast<SCEVAddRecExpr>(S)) {
2329 // Split a non-zero base out of an addrec.
2330 if (!AR->getStart()->isZero()) {
2331 CollectSubexprs(SE.getAddRecExpr(SE.getConstant(AR->getType(), 0),
2332 AR->getStepRecurrence(SE),
2334 //FIXME: AR->getNoWrapFlags(SCEV::FlagNW)
2337 CollectSubexprs(AR->getStart(), C, Ops, L, SE);
2340 } else if (const SCEVMulExpr *Mul = dyn_cast<SCEVMulExpr>(S)) {
2341 // Break (C * (a + b + c)) into C*a + C*b + C*c.
2342 if (Mul->getNumOperands() == 2)
2343 if (const SCEVConstant *Op0 =
2344 dyn_cast<SCEVConstant>(Mul->getOperand(0))) {
2345 CollectSubexprs(Mul->getOperand(1),
2346 C ? cast<SCEVConstant>(SE.getMulExpr(C, Op0)) : Op0,
2352 // Otherwise use the value itself, optionally with a scale applied.
2353 Ops.push_back(C ? SE.getMulExpr(C, S) : S);
2356 /// GenerateReassociations - Split out subexpressions from adds and the bases of
2358 void LSRInstance::GenerateReassociations(LSRUse &LU, unsigned LUIdx,
2361 // Arbitrarily cap recursion to protect compile time.
2362 if (Depth >= 3) return;
2364 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
2365 const SCEV *BaseReg = Base.BaseRegs[i];
2367 SmallVector<const SCEV *, 8> AddOps;
2368 CollectSubexprs(BaseReg, 0, AddOps, L, SE);
2370 if (AddOps.size() == 1) continue;
2372 for (SmallVectorImpl<const SCEV *>::const_iterator J = AddOps.begin(),
2373 JE = AddOps.end(); J != JE; ++J) {
2375 // Loop-variant "unknown" values are uninteresting; we won't be able to
2376 // do anything meaningful with them.
2377 if (isa<SCEVUnknown>(*J) && !SE.isLoopInvariant(*J, L))
2380 // Don't pull a constant into a register if the constant could be folded
2381 // into an immediate field.
2382 if (isAlwaysFoldable(*J, LU.MinOffset, LU.MaxOffset,
2383 Base.getNumRegs() > 1,
2384 LU.Kind, LU.AccessTy, TLI, SE))
2387 // Collect all operands except *J.
2388 SmallVector<const SCEV *, 8> InnerAddOps
2389 (((const SmallVector<const SCEV *, 8> &)AddOps).begin(), J);
2391 (llvm::next(J), ((const SmallVector<const SCEV *, 8> &)AddOps).end());
2393 // Don't leave just a constant behind in a register if the constant could
2394 // be folded into an immediate field.
2395 if (InnerAddOps.size() == 1 &&
2396 isAlwaysFoldable(InnerAddOps[0], LU.MinOffset, LU.MaxOffset,
2397 Base.getNumRegs() > 1,
2398 LU.Kind, LU.AccessTy, TLI, SE))
2401 const SCEV *InnerSum = SE.getAddExpr(InnerAddOps);
2402 if (InnerSum->isZero())
2406 // Add the remaining pieces of the add back into the new formula.
2407 const SCEVConstant *InnerSumSC = dyn_cast<SCEVConstant>(InnerSum);
2408 if (TLI && InnerSumSC &&
2409 SE.getTypeSizeInBits(InnerSumSC->getType()) <= 64 &&
2410 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
2411 InnerSumSC->getValue()->getZExtValue())) {
2412 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
2413 InnerSumSC->getValue()->getZExtValue();
2414 F.BaseRegs.erase(F.BaseRegs.begin() + i);
2416 F.BaseRegs[i] = InnerSum;
2418 // Add J as its own register, or an unfolded immediate.
2419 const SCEVConstant *SC = dyn_cast<SCEVConstant>(*J);
2420 if (TLI && SC && SE.getTypeSizeInBits(SC->getType()) <= 64 &&
2421 TLI->isLegalAddImmediate((uint64_t)F.UnfoldedOffset +
2422 SC->getValue()->getZExtValue()))
2423 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset +
2424 SC->getValue()->getZExtValue();
2426 F.BaseRegs.push_back(*J);
2428 if (InsertFormula(LU, LUIdx, F))
2429 // If that formula hadn't been seen before, recurse to find more like
2431 GenerateReassociations(LU, LUIdx, LU.Formulae.back(), Depth+1);
2436 /// GenerateCombinations - Generate a formula consisting of all of the
2437 /// loop-dominating registers added into a single register.
2438 void LSRInstance::GenerateCombinations(LSRUse &LU, unsigned LUIdx,
2440 // This method is only interesting on a plurality of registers.
2441 if (Base.BaseRegs.size() <= 1) return;
2445 SmallVector<const SCEV *, 4> Ops;
2446 for (SmallVectorImpl<const SCEV *>::const_iterator
2447 I = Base.BaseRegs.begin(), E = Base.BaseRegs.end(); I != E; ++I) {
2448 const SCEV *BaseReg = *I;
2449 if (SE.properlyDominates(BaseReg, L->getHeader()) &&
2450 !SE.hasComputableLoopEvolution(BaseReg, L))
2451 Ops.push_back(BaseReg);
2453 F.BaseRegs.push_back(BaseReg);
2455 if (Ops.size() > 1) {
2456 const SCEV *Sum = SE.getAddExpr(Ops);
2457 // TODO: If Sum is zero, it probably means ScalarEvolution missed an
2458 // opportunity to fold something. For now, just ignore such cases
2459 // rather than proceed with zero in a register.
2460 if (!Sum->isZero()) {
2461 F.BaseRegs.push_back(Sum);
2462 (void)InsertFormula(LU, LUIdx, F);
2467 /// GenerateSymbolicOffsets - Generate reuse formulae using symbolic offsets.
2468 void LSRInstance::GenerateSymbolicOffsets(LSRUse &LU, unsigned LUIdx,
2470 // We can't add a symbolic offset if the address already contains one.
2471 if (Base.AM.BaseGV) return;
2473 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
2474 const SCEV *G = Base.BaseRegs[i];
2475 GlobalValue *GV = ExtractSymbol(G, SE);
2476 if (G->isZero() || !GV)
2480 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
2481 LU.Kind, LU.AccessTy, TLI))
2484 (void)InsertFormula(LU, LUIdx, F);
2488 /// GenerateConstantOffsets - Generate reuse formulae using symbolic offsets.
2489 void LSRInstance::GenerateConstantOffsets(LSRUse &LU, unsigned LUIdx,
2491 // TODO: For now, just add the min and max offset, because it usually isn't
2492 // worthwhile looking at everything inbetween.
2493 SmallVector<int64_t, 2> Worklist;
2494 Worklist.push_back(LU.MinOffset);
2495 if (LU.MaxOffset != LU.MinOffset)
2496 Worklist.push_back(LU.MaxOffset);
2498 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i) {
2499 const SCEV *G = Base.BaseRegs[i];
2501 for (SmallVectorImpl<int64_t>::const_iterator I = Worklist.begin(),
2502 E = Worklist.end(); I != E; ++I) {
2504 F.AM.BaseOffs = (uint64_t)Base.AM.BaseOffs - *I;
2505 if (isLegalUse(F.AM, LU.MinOffset - *I, LU.MaxOffset - *I,
2506 LU.Kind, LU.AccessTy, TLI)) {
2507 // Add the offset to the base register.
2508 const SCEV *NewG = SE.getAddExpr(SE.getConstant(G->getType(), *I), G);
2509 // If it cancelled out, drop the base register, otherwise update it.
2510 if (NewG->isZero()) {
2511 std::swap(F.BaseRegs[i], F.BaseRegs.back());
2512 F.BaseRegs.pop_back();
2514 F.BaseRegs[i] = NewG;
2516 (void)InsertFormula(LU, LUIdx, F);
2520 int64_t Imm = ExtractImmediate(G, SE);
2521 if (G->isZero() || Imm == 0)
2524 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Imm;
2525 if (!isLegalUse(F.AM, LU.MinOffset, LU.MaxOffset,
2526 LU.Kind, LU.AccessTy, TLI))
2529 (void)InsertFormula(LU, LUIdx, F);
2533 /// GenerateICmpZeroScales - For ICmpZero, check to see if we can scale up
2534 /// the comparison. For example, x == y -> x*c == y*c.
2535 void LSRInstance::GenerateICmpZeroScales(LSRUse &LU, unsigned LUIdx,
2537 if (LU.Kind != LSRUse::ICmpZero) return;
2539 // Determine the integer type for the base formula.
2540 Type *IntTy = Base.getType();
2542 if (SE.getTypeSizeInBits(IntTy) > 64) return;
2544 // Don't do this if there is more than one offset.
2545 if (LU.MinOffset != LU.MaxOffset) return;
2547 assert(!Base.AM.BaseGV && "ICmpZero use is not legal!");
2549 // Check each interesting stride.
2550 for (SmallSetVector<int64_t, 8>::const_iterator
2551 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
2552 int64_t Factor = *I;
2554 // Check that the multiplication doesn't overflow.
2555 if (Base.AM.BaseOffs == INT64_MIN && Factor == -1)
2557 int64_t NewBaseOffs = (uint64_t)Base.AM.BaseOffs * Factor;
2558 if (NewBaseOffs / Factor != Base.AM.BaseOffs)
2561 // Check that multiplying with the use offset doesn't overflow.
2562 int64_t Offset = LU.MinOffset;
2563 if (Offset == INT64_MIN && Factor == -1)
2565 Offset = (uint64_t)Offset * Factor;
2566 if (Offset / Factor != LU.MinOffset)
2570 F.AM.BaseOffs = NewBaseOffs;
2572 // Check that this scale is legal.
2573 if (!isLegalUse(F.AM, Offset, Offset, LU.Kind, LU.AccessTy, TLI))
2576 // Compensate for the use having MinOffset built into it.
2577 F.AM.BaseOffs = (uint64_t)F.AM.BaseOffs + Offset - LU.MinOffset;
2579 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
2581 // Check that multiplying with each base register doesn't overflow.
2582 for (size_t i = 0, e = F.BaseRegs.size(); i != e; ++i) {
2583 F.BaseRegs[i] = SE.getMulExpr(F.BaseRegs[i], FactorS);
2584 if (getExactSDiv(F.BaseRegs[i], FactorS, SE) != Base.BaseRegs[i])
2588 // Check that multiplying with the scaled register doesn't overflow.
2590 F.ScaledReg = SE.getMulExpr(F.ScaledReg, FactorS);
2591 if (getExactSDiv(F.ScaledReg, FactorS, SE) != Base.ScaledReg)
2595 // Check that multiplying with the unfolded offset doesn't overflow.
2596 if (F.UnfoldedOffset != 0) {
2597 if (F.UnfoldedOffset == INT64_MIN && Factor == -1)
2599 F.UnfoldedOffset = (uint64_t)F.UnfoldedOffset * Factor;
2600 if (F.UnfoldedOffset / Factor != Base.UnfoldedOffset)
2604 // If we make it here and it's legal, add it.
2605 (void)InsertFormula(LU, LUIdx, F);
2610 /// GenerateScales - Generate stride factor reuse formulae by making use of
2611 /// scaled-offset address modes, for example.
2612 void LSRInstance::GenerateScales(LSRUse &LU, unsigned LUIdx, Formula Base) {
2613 // Determine the integer type for the base formula.
2614 Type *IntTy = Base.getType();
2617 // If this Formula already has a scaled register, we can't add another one.
2618 if (Base.AM.Scale != 0) return;
2620 // Check each interesting stride.
2621 for (SmallSetVector<int64_t, 8>::const_iterator
2622 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
2623 int64_t Factor = *I;
2625 Base.AM.Scale = Factor;
2626 Base.AM.HasBaseReg = Base.BaseRegs.size() > 1;
2627 // Check whether this scale is going to be legal.
2628 if (!isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
2629 LU.Kind, LU.AccessTy, TLI)) {
2630 // As a special-case, handle special out-of-loop Basic users specially.
2631 // TODO: Reconsider this special case.
2632 if (LU.Kind == LSRUse::Basic &&
2633 isLegalUse(Base.AM, LU.MinOffset, LU.MaxOffset,
2634 LSRUse::Special, LU.AccessTy, TLI) &&
2635 LU.AllFixupsOutsideLoop)
2636 LU.Kind = LSRUse::Special;
2640 // For an ICmpZero, negating a solitary base register won't lead to
2642 if (LU.Kind == LSRUse::ICmpZero &&
2643 !Base.AM.HasBaseReg && Base.AM.BaseOffs == 0 && !Base.AM.BaseGV)
2645 // For each addrec base reg, apply the scale, if possible.
2646 for (size_t i = 0, e = Base.BaseRegs.size(); i != e; ++i)
2647 if (const SCEVAddRecExpr *AR =
2648 dyn_cast<SCEVAddRecExpr>(Base.BaseRegs[i])) {
2649 const SCEV *FactorS = SE.getConstant(IntTy, Factor);
2650 if (FactorS->isZero())
2652 // Divide out the factor, ignoring high bits, since we'll be
2653 // scaling the value back up in the end.
2654 if (const SCEV *Quotient = getExactSDiv(AR, FactorS, SE, true)) {
2655 // TODO: This could be optimized to avoid all the copying.
2657 F.ScaledReg = Quotient;
2658 F.DeleteBaseReg(F.BaseRegs[i]);
2659 (void)InsertFormula(LU, LUIdx, F);
2665 /// GenerateTruncates - Generate reuse formulae from different IV types.
2666 void LSRInstance::GenerateTruncates(LSRUse &LU, unsigned LUIdx, Formula Base) {
2667 // This requires TargetLowering to tell us which truncates are free.
2670 // Don't bother truncating symbolic values.
2671 if (Base.AM.BaseGV) return;
2673 // Determine the integer type for the base formula.
2674 Type *DstTy = Base.getType();
2676 DstTy = SE.getEffectiveSCEVType(DstTy);
2678 for (SmallSetVector<Type *, 4>::const_iterator
2679 I = Types.begin(), E = Types.end(); I != E; ++I) {
2681 if (SrcTy != DstTy && TLI->isTruncateFree(SrcTy, DstTy)) {
2684 if (F.ScaledReg) F.ScaledReg = SE.getAnyExtendExpr(F.ScaledReg, *I);
2685 for (SmallVectorImpl<const SCEV *>::iterator J = F.BaseRegs.begin(),
2686 JE = F.BaseRegs.end(); J != JE; ++J)
2687 *J = SE.getAnyExtendExpr(*J, SrcTy);
2689 // TODO: This assumes we've done basic processing on all uses and
2690 // have an idea what the register usage is.
2691 if (!F.hasRegsUsedByUsesOtherThan(LUIdx, RegUses))
2694 (void)InsertFormula(LU, LUIdx, F);
2701 /// WorkItem - Helper class for GenerateCrossUseConstantOffsets. It's used to
2702 /// defer modifications so that the search phase doesn't have to worry about
2703 /// the data structures moving underneath it.
2707 const SCEV *OrigReg;
2709 WorkItem(size_t LI, int64_t I, const SCEV *R)
2710 : LUIdx(LI), Imm(I), OrigReg(R) {}
2712 void print(raw_ostream &OS) const;
2718 void WorkItem::print(raw_ostream &OS) const {
2719 OS << "in formulae referencing " << *OrigReg << " in use " << LUIdx
2720 << " , add offset " << Imm;
2723 void WorkItem::dump() const {
2724 print(errs()); errs() << '\n';
2727 /// GenerateCrossUseConstantOffsets - Look for registers which are a constant
2728 /// distance apart and try to form reuse opportunities between them.
2729 void LSRInstance::GenerateCrossUseConstantOffsets() {
2730 // Group the registers by their value without any added constant offset.
2731 typedef std::map<int64_t, const SCEV *> ImmMapTy;
2732 typedef DenseMap<const SCEV *, ImmMapTy> RegMapTy;
2734 DenseMap<const SCEV *, SmallBitVector> UsedByIndicesMap;
2735 SmallVector<const SCEV *, 8> Sequence;
2736 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
2738 const SCEV *Reg = *I;
2739 int64_t Imm = ExtractImmediate(Reg, SE);
2740 std::pair<RegMapTy::iterator, bool> Pair =
2741 Map.insert(std::make_pair(Reg, ImmMapTy()));
2743 Sequence.push_back(Reg);
2744 Pair.first->second.insert(std::make_pair(Imm, *I));
2745 UsedByIndicesMap[Reg] |= RegUses.getUsedByIndices(*I);
2748 // Now examine each set of registers with the same base value. Build up
2749 // a list of work to do and do the work in a separate step so that we're
2750 // not adding formulae and register counts while we're searching.
2751 SmallVector<WorkItem, 32> WorkItems;
2752 SmallSet<std::pair<size_t, int64_t>, 32> UniqueItems;
2753 for (SmallVectorImpl<const SCEV *>::const_iterator I = Sequence.begin(),
2754 E = Sequence.end(); I != E; ++I) {
2755 const SCEV *Reg = *I;
2756 const ImmMapTy &Imms = Map.find(Reg)->second;
2758 // It's not worthwhile looking for reuse if there's only one offset.
2759 if (Imms.size() == 1)
2762 DEBUG(dbgs() << "Generating cross-use offsets for " << *Reg << ':';
2763 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
2765 dbgs() << ' ' << J->first;
2768 // Examine each offset.
2769 for (ImmMapTy::const_iterator J = Imms.begin(), JE = Imms.end();
2771 const SCEV *OrigReg = J->second;
2773 int64_t JImm = J->first;
2774 const SmallBitVector &UsedByIndices = RegUses.getUsedByIndices(OrigReg);
2776 if (!isa<SCEVConstant>(OrigReg) &&
2777 UsedByIndicesMap[Reg].count() == 1) {
2778 DEBUG(dbgs() << "Skipping cross-use reuse for " << *OrigReg << '\n');
2782 // Conservatively examine offsets between this orig reg a few selected
2784 ImmMapTy::const_iterator OtherImms[] = {
2785 Imms.begin(), prior(Imms.end()),
2786 Imms.lower_bound((Imms.begin()->first + prior(Imms.end())->first) / 2)
2788 for (size_t i = 0, e = array_lengthof(OtherImms); i != e; ++i) {
2789 ImmMapTy::const_iterator M = OtherImms[i];
2790 if (M == J || M == JE) continue;
2792 // Compute the difference between the two.
2793 int64_t Imm = (uint64_t)JImm - M->first;
2794 for (int LUIdx = UsedByIndices.find_first(); LUIdx != -1;
2795 LUIdx = UsedByIndices.find_next(LUIdx))
2796 // Make a memo of this use, offset, and register tuple.
2797 if (UniqueItems.insert(std::make_pair(LUIdx, Imm)))
2798 WorkItems.push_back(WorkItem(LUIdx, Imm, OrigReg));
2805 UsedByIndicesMap.clear();
2806 UniqueItems.clear();
2808 // Now iterate through the worklist and add new formulae.
2809 for (SmallVectorImpl<WorkItem>::const_iterator I = WorkItems.begin(),
2810 E = WorkItems.end(); I != E; ++I) {
2811 const WorkItem &WI = *I;
2812 size_t LUIdx = WI.LUIdx;
2813 LSRUse &LU = Uses[LUIdx];
2814 int64_t Imm = WI.Imm;
2815 const SCEV *OrigReg = WI.OrigReg;
2817 Type *IntTy = SE.getEffectiveSCEVType(OrigReg->getType());
2818 const SCEV *NegImmS = SE.getSCEV(ConstantInt::get(IntTy, -(uint64_t)Imm));
2819 unsigned BitWidth = SE.getTypeSizeInBits(IntTy);
2821 // TODO: Use a more targeted data structure.
2822 for (size_t L = 0, LE = LU.Formulae.size(); L != LE; ++L) {
2823 const Formula &F = LU.Formulae[L];
2824 // Use the immediate in the scaled register.
2825 if (F.ScaledReg == OrigReg) {
2826 int64_t Offs = (uint64_t)F.AM.BaseOffs +
2827 Imm * (uint64_t)F.AM.Scale;
2828 // Don't create 50 + reg(-50).
2829 if (F.referencesReg(SE.getSCEV(
2830 ConstantInt::get(IntTy, -(uint64_t)Offs))))
2833 NewF.AM.BaseOffs = Offs;
2834 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
2835 LU.Kind, LU.AccessTy, TLI))
2837 NewF.ScaledReg = SE.getAddExpr(NegImmS, NewF.ScaledReg);
2839 // If the new scale is a constant in a register, and adding the constant
2840 // value to the immediate would produce a value closer to zero than the
2841 // immediate itself, then the formula isn't worthwhile.
2842 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(NewF.ScaledReg))
2843 if (C->getValue()->isNegative() !=
2844 (NewF.AM.BaseOffs < 0) &&
2845 (C->getValue()->getValue().abs() * APInt(BitWidth, F.AM.Scale))
2846 .ule(abs64(NewF.AM.BaseOffs)))
2850 (void)InsertFormula(LU, LUIdx, NewF);
2852 // Use the immediate in a base register.
2853 for (size_t N = 0, NE = F.BaseRegs.size(); N != NE; ++N) {
2854 const SCEV *BaseReg = F.BaseRegs[N];
2855 if (BaseReg != OrigReg)
2858 NewF.AM.BaseOffs = (uint64_t)NewF.AM.BaseOffs + Imm;
2859 if (!isLegalUse(NewF.AM, LU.MinOffset, LU.MaxOffset,
2860 LU.Kind, LU.AccessTy, TLI)) {
2862 !TLI->isLegalAddImmediate((uint64_t)NewF.UnfoldedOffset + Imm))
2865 NewF.UnfoldedOffset = (uint64_t)NewF.UnfoldedOffset + Imm;
2867 NewF.BaseRegs[N] = SE.getAddExpr(NegImmS, BaseReg);
2869 // If the new formula has a constant in a register, and adding the
2870 // constant value to the immediate would produce a value closer to
2871 // zero than the immediate itself, then the formula isn't worthwhile.
2872 for (SmallVectorImpl<const SCEV *>::const_iterator
2873 J = NewF.BaseRegs.begin(), JE = NewF.BaseRegs.end();
2875 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*J))
2876 if ((C->getValue()->getValue() + NewF.AM.BaseOffs).abs().slt(
2877 abs64(NewF.AM.BaseOffs)) &&
2878 (C->getValue()->getValue() +
2879 NewF.AM.BaseOffs).countTrailingZeros() >=
2880 CountTrailingZeros_64(NewF.AM.BaseOffs))
2884 (void)InsertFormula(LU, LUIdx, NewF);
2893 /// GenerateAllReuseFormulae - Generate formulae for each use.
2895 LSRInstance::GenerateAllReuseFormulae() {
2896 // This is split into multiple loops so that hasRegsUsedByUsesOtherThan
2897 // queries are more precise.
2898 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2899 LSRUse &LU = Uses[LUIdx];
2900 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2901 GenerateReassociations(LU, LUIdx, LU.Formulae[i]);
2902 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2903 GenerateCombinations(LU, LUIdx, LU.Formulae[i]);
2905 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2906 LSRUse &LU = Uses[LUIdx];
2907 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2908 GenerateSymbolicOffsets(LU, LUIdx, LU.Formulae[i]);
2909 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2910 GenerateConstantOffsets(LU, LUIdx, LU.Formulae[i]);
2911 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2912 GenerateICmpZeroScales(LU, LUIdx, LU.Formulae[i]);
2913 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2914 GenerateScales(LU, LUIdx, LU.Formulae[i]);
2916 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2917 LSRUse &LU = Uses[LUIdx];
2918 for (size_t i = 0, f = LU.Formulae.size(); i != f; ++i)
2919 GenerateTruncates(LU, LUIdx, LU.Formulae[i]);
2922 GenerateCrossUseConstantOffsets();
2924 DEBUG(dbgs() << "\n"
2925 "After generating reuse formulae:\n";
2926 print_uses(dbgs()));
2929 /// If there are multiple formulae with the same set of registers used
2930 /// by other uses, pick the best one and delete the others.
2931 void LSRInstance::FilterOutUndesirableDedicatedRegisters() {
2932 DenseSet<const SCEV *> VisitedRegs;
2933 SmallPtrSet<const SCEV *, 16> Regs;
2934 SmallPtrSet<const SCEV *, 16> LoserRegs;
2936 bool ChangedFormulae = false;
2939 // Collect the best formula for each unique set of shared registers. This
2940 // is reset for each use.
2941 typedef DenseMap<SmallVector<const SCEV *, 2>, size_t, UniquifierDenseMapInfo>
2943 BestFormulaeTy BestFormulae;
2945 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
2946 LSRUse &LU = Uses[LUIdx];
2947 DEBUG(dbgs() << "Filtering for use "; LU.print(dbgs()); dbgs() << '\n');
2950 for (size_t FIdx = 0, NumForms = LU.Formulae.size();
2951 FIdx != NumForms; ++FIdx) {
2952 Formula &F = LU.Formulae[FIdx];
2954 // Some formulas are instant losers. For example, they may depend on
2955 // nonexistent AddRecs from other loops. These need to be filtered
2956 // immediately, otherwise heuristics could choose them over others leading
2957 // to an unsatisfactory solution. Passing LoserRegs into RateFormula here
2958 // avoids the need to recompute this information across formulae using the
2959 // same bad AddRec. Passing LoserRegs is also essential unless we remove
2960 // the corresponding bad register from the Regs set.
2963 CostF.RateFormula(F, Regs, VisitedRegs, L, LU.Offsets, SE, DT,
2965 if (CostF.isLoser()) {
2966 // During initial formula generation, undesirable formulae are generated
2967 // by uses within other loops that have some non-trivial address mode or
2968 // use the postinc form of the IV. LSR needs to provide these formulae
2969 // as the basis of rediscovering the desired formula that uses an AddRec
2970 // corresponding to the existing phi. Once all formulae have been
2971 // generated, these initial losers may be pruned.
2972 DEBUG(dbgs() << " Filtering loser "; F.print(dbgs());
2976 SmallVector<const SCEV *, 2> Key;
2977 for (SmallVectorImpl<const SCEV *>::const_iterator J = F.BaseRegs.begin(),
2978 JE = F.BaseRegs.end(); J != JE; ++J) {
2979 const SCEV *Reg = *J;
2980 if (RegUses.isRegUsedByUsesOtherThan(Reg, LUIdx))
2984 RegUses.isRegUsedByUsesOtherThan(F.ScaledReg, LUIdx))
2985 Key.push_back(F.ScaledReg);
2986 // Unstable sort by host order ok, because this is only used for
2988 std::sort(Key.begin(), Key.end());
2990 std::pair<BestFormulaeTy::const_iterator, bool> P =
2991 BestFormulae.insert(std::make_pair(Key, FIdx));
2995 Formula &Best = LU.Formulae[P.first->second];
2999 CostBest.RateFormula(Best, Regs, VisitedRegs, L, LU.Offsets, SE, DT);
3000 if (CostF < CostBest)
3002 DEBUG(dbgs() << " Filtering out formula "; F.print(dbgs());
3004 " in favor of formula "; Best.print(dbgs());
3008 ChangedFormulae = true;
3010 LU.DeleteFormula(F);
3016 // Now that we've filtered out some formulae, recompute the Regs set.
3018 LU.RecomputeRegs(LUIdx, RegUses);
3020 // Reset this to prepare for the next use.
3021 BestFormulae.clear();
3024 DEBUG(if (ChangedFormulae) {
3026 "After filtering out undesirable candidates:\n";
3031 // This is a rough guess that seems to work fairly well.
3032 static const size_t ComplexityLimit = UINT16_MAX;
3034 /// EstimateSearchSpaceComplexity - Estimate the worst-case number of
3035 /// solutions the solver might have to consider. It almost never considers
3036 /// this many solutions because it prune the search space, but the pruning
3037 /// isn't always sufficient.
3038 size_t LSRInstance::EstimateSearchSpaceComplexity() const {
3040 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
3041 E = Uses.end(); I != E; ++I) {
3042 size_t FSize = I->Formulae.size();
3043 if (FSize >= ComplexityLimit) {
3044 Power = ComplexityLimit;
3048 if (Power >= ComplexityLimit)
3054 /// NarrowSearchSpaceByDetectingSupersets - When one formula uses a superset
3055 /// of the registers of another formula, it won't help reduce register
3056 /// pressure (though it may not necessarily hurt register pressure); remove
3057 /// it to simplify the system.
3058 void LSRInstance::NarrowSearchSpaceByDetectingSupersets() {
3059 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3060 DEBUG(dbgs() << "The search space is too complex.\n");
3062 DEBUG(dbgs() << "Narrowing the search space by eliminating formulae "
3063 "which use a superset of registers used by other "
3066 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3067 LSRUse &LU = Uses[LUIdx];
3069 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
3070 Formula &F = LU.Formulae[i];
3071 // Look for a formula with a constant or GV in a register. If the use
3072 // also has a formula with that same value in an immediate field,
3073 // delete the one that uses a register.
3074 for (SmallVectorImpl<const SCEV *>::const_iterator
3075 I = F.BaseRegs.begin(), E = F.BaseRegs.end(); I != E; ++I) {
3076 if (const SCEVConstant *C = dyn_cast<SCEVConstant>(*I)) {
3078 NewF.AM.BaseOffs += C->getValue()->getSExtValue();
3079 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
3080 (I - F.BaseRegs.begin()));
3081 if (LU.HasFormulaWithSameRegs(NewF)) {
3082 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
3083 LU.DeleteFormula(F);
3089 } else if (const SCEVUnknown *U = dyn_cast<SCEVUnknown>(*I)) {
3090 if (GlobalValue *GV = dyn_cast<GlobalValue>(U->getValue()))
3093 NewF.AM.BaseGV = GV;
3094 NewF.BaseRegs.erase(NewF.BaseRegs.begin() +
3095 (I - F.BaseRegs.begin()));
3096 if (LU.HasFormulaWithSameRegs(NewF)) {
3097 DEBUG(dbgs() << " Deleting "; F.print(dbgs());
3099 LU.DeleteFormula(F);
3110 LU.RecomputeRegs(LUIdx, RegUses);
3113 DEBUG(dbgs() << "After pre-selection:\n";
3114 print_uses(dbgs()));
3118 /// NarrowSearchSpaceByCollapsingUnrolledCode - When there are many registers
3119 /// for expressions like A, A+1, A+2, etc., allocate a single register for
3121 void LSRInstance::NarrowSearchSpaceByCollapsingUnrolledCode() {
3122 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3123 DEBUG(dbgs() << "The search space is too complex.\n");
3125 DEBUG(dbgs() << "Narrowing the search space by assuming that uses "
3126 "separated by a constant offset will use the same "
3129 // This is especially useful for unrolled loops.
3131 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3132 LSRUse &LU = Uses[LUIdx];
3133 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
3134 E = LU.Formulae.end(); I != E; ++I) {
3135 const Formula &F = *I;
3136 if (F.AM.BaseOffs != 0 && F.AM.Scale == 0) {
3137 if (LSRUse *LUThatHas = FindUseWithSimilarFormula(F, LU)) {
3138 if (reconcileNewOffset(*LUThatHas, F.AM.BaseOffs,
3139 /*HasBaseReg=*/false,
3140 LU.Kind, LU.AccessTy)) {
3141 DEBUG(dbgs() << " Deleting use "; LU.print(dbgs());
3144 LUThatHas->AllFixupsOutsideLoop &= LU.AllFixupsOutsideLoop;
3146 // Update the relocs to reference the new use.
3147 for (SmallVectorImpl<LSRFixup>::iterator I = Fixups.begin(),
3148 E = Fixups.end(); I != E; ++I) {
3149 LSRFixup &Fixup = *I;
3150 if (Fixup.LUIdx == LUIdx) {
3151 Fixup.LUIdx = LUThatHas - &Uses.front();
3152 Fixup.Offset += F.AM.BaseOffs;
3153 // Add the new offset to LUThatHas' offset list.
3154 if (LUThatHas->Offsets.back() != Fixup.Offset) {
3155 LUThatHas->Offsets.push_back(Fixup.Offset);
3156 if (Fixup.Offset > LUThatHas->MaxOffset)
3157 LUThatHas->MaxOffset = Fixup.Offset;
3158 if (Fixup.Offset < LUThatHas->MinOffset)
3159 LUThatHas->MinOffset = Fixup.Offset;
3161 DEBUG(dbgs() << "New fixup has offset "
3162 << Fixup.Offset << '\n');
3164 if (Fixup.LUIdx == NumUses-1)
3165 Fixup.LUIdx = LUIdx;
3168 // Delete formulae from the new use which are no longer legal.
3170 for (size_t i = 0, e = LUThatHas->Formulae.size(); i != e; ++i) {
3171 Formula &F = LUThatHas->Formulae[i];
3172 if (!isLegalUse(F.AM,
3173 LUThatHas->MinOffset, LUThatHas->MaxOffset,
3174 LUThatHas->Kind, LUThatHas->AccessTy, TLI)) {
3175 DEBUG(dbgs() << " Deleting "; F.print(dbgs());
3177 LUThatHas->DeleteFormula(F);
3184 LUThatHas->RecomputeRegs(LUThatHas - &Uses.front(), RegUses);
3186 // Delete the old use.
3187 DeleteUse(LU, LUIdx);
3197 DEBUG(dbgs() << "After pre-selection:\n";
3198 print_uses(dbgs()));
3202 /// NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters - Call
3203 /// FilterOutUndesirableDedicatedRegisters again, if necessary, now that
3204 /// we've done more filtering, as it may be able to find more formulae to
3206 void LSRInstance::NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters(){
3207 if (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3208 DEBUG(dbgs() << "The search space is too complex.\n");
3210 DEBUG(dbgs() << "Narrowing the search space by re-filtering out "
3211 "undesirable dedicated registers.\n");
3213 FilterOutUndesirableDedicatedRegisters();
3215 DEBUG(dbgs() << "After pre-selection:\n";
3216 print_uses(dbgs()));
3220 /// NarrowSearchSpaceByPickingWinnerRegs - Pick a register which seems likely
3221 /// to be profitable, and then in any use which has any reference to that
3222 /// register, delete all formulae which do not reference that register.
3223 void LSRInstance::NarrowSearchSpaceByPickingWinnerRegs() {
3224 // With all other options exhausted, loop until the system is simple
3225 // enough to handle.
3226 SmallPtrSet<const SCEV *, 4> Taken;
3227 while (EstimateSearchSpaceComplexity() >= ComplexityLimit) {
3228 // Ok, we have too many of formulae on our hands to conveniently handle.
3229 // Use a rough heuristic to thin out the list.
3230 DEBUG(dbgs() << "The search space is too complex.\n");
3232 // Pick the register which is used by the most LSRUses, which is likely
3233 // to be a good reuse register candidate.
3234 const SCEV *Best = 0;
3235 unsigned BestNum = 0;
3236 for (RegUseTracker::const_iterator I = RegUses.begin(), E = RegUses.end();
3238 const SCEV *Reg = *I;
3239 if (Taken.count(Reg))
3244 unsigned Count = RegUses.getUsedByIndices(Reg).count();
3245 if (Count > BestNum) {
3252 DEBUG(dbgs() << "Narrowing the search space by assuming " << *Best
3253 << " will yield profitable reuse.\n");
3256 // In any use with formulae which references this register, delete formulae
3257 // which don't reference it.
3258 for (size_t LUIdx = 0, NumUses = Uses.size(); LUIdx != NumUses; ++LUIdx) {
3259 LSRUse &LU = Uses[LUIdx];
3260 if (!LU.Regs.count(Best)) continue;
3263 for (size_t i = 0, e = LU.Formulae.size(); i != e; ++i) {
3264 Formula &F = LU.Formulae[i];
3265 if (!F.referencesReg(Best)) {
3266 DEBUG(dbgs() << " Deleting "; F.print(dbgs()); dbgs() << '\n');
3267 LU.DeleteFormula(F);
3271 assert(e != 0 && "Use has no formulae left! Is Regs inconsistent?");
3277 LU.RecomputeRegs(LUIdx, RegUses);
3280 DEBUG(dbgs() << "After pre-selection:\n";
3281 print_uses(dbgs()));
3285 /// NarrowSearchSpaceUsingHeuristics - If there are an extraordinary number of
3286 /// formulae to choose from, use some rough heuristics to prune down the number
3287 /// of formulae. This keeps the main solver from taking an extraordinary amount
3288 /// of time in some worst-case scenarios.
3289 void LSRInstance::NarrowSearchSpaceUsingHeuristics() {
3290 NarrowSearchSpaceByDetectingSupersets();
3291 NarrowSearchSpaceByCollapsingUnrolledCode();
3292 NarrowSearchSpaceByRefilteringUndesirableDedicatedRegisters();
3293 NarrowSearchSpaceByPickingWinnerRegs();
3296 /// SolveRecurse - This is the recursive solver.
3297 void LSRInstance::SolveRecurse(SmallVectorImpl<const Formula *> &Solution,
3299 SmallVectorImpl<const Formula *> &Workspace,
3300 const Cost &CurCost,
3301 const SmallPtrSet<const SCEV *, 16> &CurRegs,
3302 DenseSet<const SCEV *> &VisitedRegs) const {
3305 // - use more aggressive filtering
3306 // - sort the formula so that the most profitable solutions are found first
3307 // - sort the uses too
3309 // - don't compute a cost, and then compare. compare while computing a cost
3311 // - track register sets with SmallBitVector
3313 const LSRUse &LU = Uses[Workspace.size()];
3315 // If this use references any register that's already a part of the
3316 // in-progress solution, consider it a requirement that a formula must
3317 // reference that register in order to be considered. This prunes out
3318 // unprofitable searching.
3319 SmallSetVector<const SCEV *, 4> ReqRegs;
3320 for (SmallPtrSet<const SCEV *, 16>::const_iterator I = CurRegs.begin(),
3321 E = CurRegs.end(); I != E; ++I)
3322 if (LU.Regs.count(*I))
3325 bool AnySatisfiedReqRegs = false;
3326 SmallPtrSet<const SCEV *, 16> NewRegs;
3329 for (SmallVectorImpl<Formula>::const_iterator I = LU.Formulae.begin(),
3330 E = LU.Formulae.end(); I != E; ++I) {
3331 const Formula &F = *I;
3333 // Ignore formulae which do not use any of the required registers.
3334 for (SmallSetVector<const SCEV *, 4>::const_iterator J = ReqRegs.begin(),
3335 JE = ReqRegs.end(); J != JE; ++J) {
3336 const SCEV *Reg = *J;
3337 if ((!F.ScaledReg || F.ScaledReg != Reg) &&
3338 std::find(F.BaseRegs.begin(), F.BaseRegs.end(), Reg) ==
3342 AnySatisfiedReqRegs = true;
3344 // Evaluate the cost of the current formula. If it's already worse than
3345 // the current best, prune the search at that point.
3348 NewCost.RateFormula(F, NewRegs, VisitedRegs, L, LU.Offsets, SE, DT);
3349 if (NewCost < SolutionCost) {
3350 Workspace.push_back(&F);
3351 if (Workspace.size() != Uses.size()) {
3352 SolveRecurse(Solution, SolutionCost, Workspace, NewCost,
3353 NewRegs, VisitedRegs);
3354 if (F.getNumRegs() == 1 && Workspace.size() == 1)
3355 VisitedRegs.insert(F.ScaledReg ? F.ScaledReg : F.BaseRegs[0]);
3357 DEBUG(dbgs() << "New best at "; NewCost.print(dbgs());
3358 dbgs() << ". Regs:";
3359 for (SmallPtrSet<const SCEV *, 16>::const_iterator
3360 I = NewRegs.begin(), E = NewRegs.end(); I != E; ++I)
3361 dbgs() << ' ' << **I;
3364 SolutionCost = NewCost;
3365 Solution = Workspace;
3367 Workspace.pop_back();
3372 if (!EnableRetry && !AnySatisfiedReqRegs)
3375 // If none of the formulae had all of the required registers, relax the
3376 // constraint so that we don't exclude all formulae.
3377 if (!AnySatisfiedReqRegs) {
3378 assert(!ReqRegs.empty() && "Solver failed even without required registers");
3384 /// Solve - Choose one formula from each use. Return the results in the given
3385 /// Solution vector.
3386 void LSRInstance::Solve(SmallVectorImpl<const Formula *> &Solution) const {
3387 SmallVector<const Formula *, 8> Workspace;
3389 SolutionCost.Loose();
3391 SmallPtrSet<const SCEV *, 16> CurRegs;
3392 DenseSet<const SCEV *> VisitedRegs;
3393 Workspace.reserve(Uses.size());
3395 // SolveRecurse does all the work.
3396 SolveRecurse(Solution, SolutionCost, Workspace, CurCost,
3397 CurRegs, VisitedRegs);
3398 if (Solution.empty()) {
3399 DEBUG(dbgs() << "\nNo Satisfactory Solution\n");
3403 // Ok, we've now made all our decisions.
3404 DEBUG(dbgs() << "\n"
3405 "The chosen solution requires "; SolutionCost.print(dbgs());
3407 for (size_t i = 0, e = Uses.size(); i != e; ++i) {
3409 Uses[i].print(dbgs());
3412 Solution[i]->print(dbgs());
3416 assert(Solution.size() == Uses.size() && "Malformed solution!");
3419 /// HoistInsertPosition - Helper for AdjustInsertPositionForExpand. Climb up
3420 /// the dominator tree far as we can go while still being dominated by the
3421 /// input positions. This helps canonicalize the insert position, which
3422 /// encourages sharing.
3423 BasicBlock::iterator
3424 LSRInstance::HoistInsertPosition(BasicBlock::iterator IP,
3425 const SmallVectorImpl<Instruction *> &Inputs)
3428 const Loop *IPLoop = LI.getLoopFor(IP->getParent());
3429 unsigned IPLoopDepth = IPLoop ? IPLoop->getLoopDepth() : 0;
3432 for (DomTreeNode *Rung = DT.getNode(IP->getParent()); ; ) {
3433 if (!Rung) return IP;
3434 Rung = Rung->getIDom();
3435 if (!Rung) return IP;
3436 IDom = Rung->getBlock();
3438 // Don't climb into a loop though.
3439 const Loop *IDomLoop = LI.getLoopFor(IDom);
3440 unsigned IDomDepth = IDomLoop ? IDomLoop->getLoopDepth() : 0;
3441 if (IDomDepth <= IPLoopDepth &&
3442 (IDomDepth != IPLoopDepth || IDomLoop == IPLoop))
3446 bool AllDominate = true;
3447 Instruction *BetterPos = 0;
3448 Instruction *Tentative = IDom->getTerminator();
3449 for (SmallVectorImpl<Instruction *>::const_iterator I = Inputs.begin(),
3450 E = Inputs.end(); I != E; ++I) {
3451 Instruction *Inst = *I;
3452 if (Inst == Tentative || !DT.dominates(Inst, Tentative)) {
3453 AllDominate = false;
3456 // Attempt to find an insert position in the middle of the block,
3457 // instead of at the end, so that it can be used for other expansions.
3458 if (IDom == Inst->getParent() &&
3459 (!BetterPos || DT.dominates(BetterPos, Inst)))
3460 BetterPos = llvm::next(BasicBlock::iterator(Inst));
3473 /// AdjustInsertPositionForExpand - Determine an input position which will be
3474 /// dominated by the operands and which will dominate the result.
3475 BasicBlock::iterator
3476 LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator IP,
3478 const LSRUse &LU) const {
3479 // Collect some instructions which must be dominated by the
3480 // expanding replacement. These must be dominated by any operands that
3481 // will be required in the expansion.
3482 SmallVector<Instruction *, 4> Inputs;
3483 if (Instruction *I = dyn_cast<Instruction>(LF.OperandValToReplace))
3484 Inputs.push_back(I);
3485 if (LU.Kind == LSRUse::ICmpZero)
3486 if (Instruction *I =
3487 dyn_cast<Instruction>(cast<ICmpInst>(LF.UserInst)->getOperand(1)))
3488 Inputs.push_back(I);
3489 if (LF.PostIncLoops.count(L)) {
3490 if (LF.isUseFullyOutsideLoop(L))
3491 Inputs.push_back(L->getLoopLatch()->getTerminator());
3493 Inputs.push_back(IVIncInsertPos);
3495 // The expansion must also be dominated by the increment positions of any
3496 // loops it for which it is using post-inc mode.
3497 for (PostIncLoopSet::const_iterator I = LF.PostIncLoops.begin(),
3498 E = LF.PostIncLoops.end(); I != E; ++I) {
3499 const Loop *PIL = *I;
3500 if (PIL == L) continue;
3502 // Be dominated by the loop exit.
3503 SmallVector<BasicBlock *, 4> ExitingBlocks;
3504 PIL->getExitingBlocks(ExitingBlocks);
3505 if (!ExitingBlocks.empty()) {
3506 BasicBlock *BB = ExitingBlocks[0];
3507 for (unsigned i = 1, e = ExitingBlocks.size(); i != e; ++i)
3508 BB = DT.findNearestCommonDominator(BB, ExitingBlocks[i]);
3509 Inputs.push_back(BB->getTerminator());
3513 // Then, climb up the immediate dominator tree as far as we can go while
3514 // still being dominated by the input positions.
3515 IP = HoistInsertPosition(IP, Inputs);
3517 // Don't insert instructions before PHI nodes.
3518 while (isa<PHINode>(IP)) ++IP;
3520 // Ignore landingpad instructions.
3521 while (isa<LandingPadInst>(IP)) ++IP;
3523 // Ignore debug intrinsics.
3524 while (isa<DbgInfoIntrinsic>(IP)) ++IP;
3529 /// Expand - Emit instructions for the leading candidate expression for this
3530 /// LSRUse (this is called "expanding").
3531 Value *LSRInstance::Expand(const LSRFixup &LF,
3533 BasicBlock::iterator IP,
3534 SCEVExpander &Rewriter,
3535 SmallVectorImpl<WeakVH> &DeadInsts) const {
3536 const LSRUse &LU = Uses[LF.LUIdx];
3538 // Determine an input position which will be dominated by the operands and
3539 // which will dominate the result.
3540 IP = AdjustInsertPositionForExpand(IP, LF, LU);
3542 // Inform the Rewriter if we have a post-increment use, so that it can
3543 // perform an advantageous expansion.
3544 Rewriter.setPostInc(LF.PostIncLoops);
3546 // This is the type that the user actually needs.
3547 Type *OpTy = LF.OperandValToReplace->getType();
3548 // This will be the type that we'll initially expand to.
3549 Type *Ty = F.getType();
3551 // No type known; just expand directly to the ultimate type.
3553 else if (SE.getEffectiveSCEVType(Ty) == SE.getEffectiveSCEVType(OpTy))
3554 // Expand directly to the ultimate type if it's the right size.
3556 // This is the type to do integer arithmetic in.
3557 Type *IntTy = SE.getEffectiveSCEVType(Ty);
3559 // Build up a list of operands to add together to form the full base.
3560 SmallVector<const SCEV *, 8> Ops;
3562 // Expand the BaseRegs portion.
3563 for (SmallVectorImpl<const SCEV *>::const_iterator I = F.BaseRegs.begin(),
3564 E = F.BaseRegs.end(); I != E; ++I) {
3565 const SCEV *Reg = *I;
3566 assert(!Reg->isZero() && "Zero allocated in a base register!");
3568 // If we're expanding for a post-inc user, make the post-inc adjustment.
3569 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
3570 Reg = TransformForPostIncUse(Denormalize, Reg,
3571 LF.UserInst, LF.OperandValToReplace,
3574 Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, 0, IP)));
3577 // Flush the operand list to suppress SCEVExpander hoisting.
3579 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
3581 Ops.push_back(SE.getUnknown(FullV));
3584 // Expand the ScaledReg portion.
3585 Value *ICmpScaledV = 0;
3586 if (F.AM.Scale != 0) {
3587 const SCEV *ScaledS = F.ScaledReg;
3589 // If we're expanding for a post-inc user, make the post-inc adjustment.
3590 PostIncLoopSet &Loops = const_cast<PostIncLoopSet &>(LF.PostIncLoops);
3591 ScaledS = TransformForPostIncUse(Denormalize, ScaledS,
3592 LF.UserInst, LF.OperandValToReplace,
3595 if (LU.Kind == LSRUse::ICmpZero) {
3596 // An interesting way of "folding" with an icmp is to use a negated
3597 // scale, which we'll implement by inserting it into the other operand
3599 assert(F.AM.Scale == -1 &&
3600 "The only scale supported by ICmpZero uses is -1!");
3601 ICmpScaledV = Rewriter.expandCodeFor(ScaledS, 0, IP);
3603 // Otherwise just expand the scaled register and an explicit scale,
3604 // which is expected to be matched as part of the address.
3605 ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, 0, IP));
3606 ScaledS = SE.getMulExpr(ScaledS,
3607 SE.getConstant(ScaledS->getType(), F.AM.Scale));
3608 Ops.push_back(ScaledS);
3610 // Flush the operand list to suppress SCEVExpander hoisting.
3611 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
3613 Ops.push_back(SE.getUnknown(FullV));
3617 // Expand the GV portion.
3619 Ops.push_back(SE.getUnknown(F.AM.BaseGV));
3621 // Flush the operand list to suppress SCEVExpander hoisting.
3622 Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP);
3624 Ops.push_back(SE.getUnknown(FullV));
3627 // Expand the immediate portion.
3628 int64_t Offset = (uint64_t)F.AM.BaseOffs + LF.Offset;
3630 if (LU.Kind == LSRUse::ICmpZero) {
3631 // The other interesting way of "folding" with an ICmpZero is to use a
3632 // negated immediate.
3634 ICmpScaledV = ConstantInt::get(IntTy, -(uint64_t)Offset);
3636 Ops.push_back(SE.getUnknown(ICmpScaledV));
3637 ICmpScaledV = ConstantInt::get(IntTy, Offset);
3640 // Just add the immediate values. These again are expected to be matched
3641 // as part of the address.
3642 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy, Offset)));
3646 // Expand the unfolded offset portion.
3647 int64_t UnfoldedOffset = F.UnfoldedOffset;
3648 if (UnfoldedOffset != 0) {
3649 // Just add the immediate values.
3650 Ops.push_back(SE.getUnknown(ConstantInt::getSigned(IntTy,
3654 // Emit instructions summing all the operands.
3655 const SCEV *FullS = Ops.empty() ?
3656 SE.getConstant(IntTy, 0) :
3658 Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP);
3660 // We're done expanding now, so reset the rewriter.
3661 Rewriter.clearPostInc();
3663 // An ICmpZero Formula represents an ICmp which we're handling as a
3664 // comparison against zero. Now that we've expanded an expression for that
3665 // form, update the ICmp's other operand.
3666 if (LU.Kind == LSRUse::ICmpZero) {
3667 ICmpInst *CI = cast<ICmpInst>(LF.UserInst);
3668 DeadInsts.push_back(CI->getOperand(1));
3669 assert(!F.AM.BaseGV && "ICmp does not support folding a global value and "
3670 "a scale at the same time!");
3671 if (F.AM.Scale == -1) {
3672 if (ICmpScaledV->getType() != OpTy) {
3674 CastInst::Create(CastInst::getCastOpcode(ICmpScaledV, false,
3676 ICmpScaledV, OpTy, "tmp", CI);
3679 CI->setOperand(1, ICmpScaledV);
3681 assert(F.AM.Scale == 0 &&
3682 "ICmp does not support folding a global value and "
3683 "a scale at the same time!");
3684 Constant *C = ConstantInt::getSigned(SE.getEffectiveSCEVType(OpTy),
3686 if (C->getType() != OpTy)
3687 C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
3691 CI->setOperand(1, C);
3698 /// RewriteForPHI - Helper for Rewrite. PHI nodes are special because the use
3699 /// of their operands effectively happens in their predecessor blocks, so the
3700 /// expression may need to be expanded in multiple places.
3701 void LSRInstance::RewriteForPHI(PHINode *PN,
3704 SCEVExpander &Rewriter,
3705 SmallVectorImpl<WeakVH> &DeadInsts,
3707 DenseMap<BasicBlock *, Value *> Inserted;
3708 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
3709 if (PN->getIncomingValue(i) == LF.OperandValToReplace) {
3710 BasicBlock *BB = PN->getIncomingBlock(i);
3712 // If this is a critical edge, split the edge so that we do not insert
3713 // the code on all predecessor/successor paths. We do this unless this
3714 // is the canonical backedge for this loop, which complicates post-inc
3716 if (e != 1 && BB->getTerminator()->getNumSuccessors() > 1 &&
3717 !isa<IndirectBrInst>(BB->getTerminator())) {
3718 BasicBlock *Parent = PN->getParent();
3719 Loop *PNLoop = LI.getLoopFor(Parent);
3720 if (!PNLoop || Parent != PNLoop->getHeader()) {
3721 // Split the critical edge.
3722 BasicBlock *NewBB = 0;
3723 if (!Parent->isLandingPad()) {
3724 NewBB = SplitCriticalEdge(BB, Parent, P,
3725 /*MergeIdenticalEdges=*/true,
3726 /*DontDeleteUselessPhis=*/true);
3728 SmallVector<BasicBlock*, 2> NewBBs;
3729 SplitLandingPadPredecessors(Parent, BB, "", "", P, NewBBs);
3733 // If PN is outside of the loop and BB is in the loop, we want to
3734 // move the block to be immediately before the PHI block, not
3735 // immediately after BB.
3736 if (L->contains(BB) && !L->contains(PN))
3737 NewBB->moveBefore(PN->getParent());
3739 // Splitting the edge can reduce the number of PHI entries we have.
3740 e = PN->getNumIncomingValues();
3742 i = PN->getBasicBlockIndex(BB);
3746 std::pair<DenseMap<BasicBlock *, Value *>::iterator, bool> Pair =
3747 Inserted.insert(std::make_pair(BB, static_cast<Value *>(0)));
3749 PN->setIncomingValue(i, Pair.first->second);
3751 Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts);
3753 // If this is reuse-by-noop-cast, insert the noop cast.
3754 Type *OpTy = LF.OperandValToReplace->getType();
3755 if (FullV->getType() != OpTy)
3757 CastInst::Create(CastInst::getCastOpcode(FullV, false,
3759 FullV, LF.OperandValToReplace->getType(),
3760 "tmp", BB->getTerminator());
3762 PN->setIncomingValue(i, FullV);
3763 Pair.first->second = FullV;
3768 /// Rewrite - Emit instructions for the leading candidate expression for this
3769 /// LSRUse (this is called "expanding"), and update the UserInst to reference
3770 /// the newly expanded value.
3771 void LSRInstance::Rewrite(const LSRFixup &LF,
3773 SCEVExpander &Rewriter,
3774 SmallVectorImpl<WeakVH> &DeadInsts,
3776 // First, find an insertion point that dominates UserInst. For PHI nodes,
3777 // find the nearest block which dominates all the relevant uses.
3778 if (PHINode *PN = dyn_cast<PHINode>(LF.UserInst)) {
3779 RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P);
3781 Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts);
3783 // If this is reuse-by-noop-cast, insert the noop cast.
3784 Type *OpTy = LF.OperandValToReplace->getType();
3785 if (FullV->getType() != OpTy) {
3787 CastInst::Create(CastInst::getCastOpcode(FullV, false, OpTy, false),
3788 FullV, OpTy, "tmp", LF.UserInst);
3792 // Update the user. ICmpZero is handled specially here (for now) because
3793 // Expand may have updated one of the operands of the icmp already, and
3794 // its new value may happen to be equal to LF.OperandValToReplace, in
3795 // which case doing replaceUsesOfWith leads to replacing both operands
3796 // with the same value. TODO: Reorganize this.
3797 if (Uses[LF.LUIdx].Kind == LSRUse::ICmpZero)
3798 LF.UserInst->setOperand(0, FullV);
3800 LF.UserInst->replaceUsesOfWith(LF.OperandValToReplace, FullV);
3803 DeadInsts.push_back(LF.OperandValToReplace);
3806 /// ImplementSolution - Rewrite all the fixup locations with new values,
3807 /// following the chosen solution.
3809 LSRInstance::ImplementSolution(const SmallVectorImpl<const Formula *> &Solution,
3811 // Keep track of instructions we may have made dead, so that
3812 // we can remove them after we are done working.
3813 SmallVector<WeakVH, 16> DeadInsts;
3815 SCEVExpander Rewriter(SE, "lsr");
3816 Rewriter.disableCanonicalMode();
3817 Rewriter.enableLSRMode();
3818 Rewriter.setIVIncInsertPos(L, IVIncInsertPos);
3820 // Expand the new value definitions and update the users.
3821 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
3822 E = Fixups.end(); I != E; ++I) {
3823 const LSRFixup &Fixup = *I;
3825 Rewrite(Fixup, *Solution[Fixup.LUIdx], Rewriter, DeadInsts, P);
3830 // Clean up after ourselves. This must be done before deleting any
3834 Changed |= DeleteTriviallyDeadInstructions(DeadInsts);
3837 LSRInstance::LSRInstance(const TargetLowering *tli, Loop *l, Pass *P)
3838 : IU(P->getAnalysis<IVUsers>()),
3839 SE(P->getAnalysis<ScalarEvolution>()),
3840 DT(P->getAnalysis<DominatorTree>()),
3841 LI(P->getAnalysis<LoopInfo>()),
3842 TLI(tli), L(l), Changed(false), IVIncInsertPos(0) {
3844 // If LoopSimplify form is not available, stay out of trouble.
3845 if (!L->isLoopSimplifyForm()) return;
3847 // If there's no interesting work to be done, bail early.
3848 if (IU.empty()) return;
3850 DEBUG(dbgs() << "\nLSR on loop ";
3851 WriteAsOperand(dbgs(), L->getHeader(), /*PrintType=*/false);
3854 // First, perform some low-level loop optimizations.
3856 OptimizeLoopTermCond();
3858 // If loop preparation eliminates all interesting IV users, bail.
3859 if (IU.empty()) return;
3861 // Skip nested loops until we can model them better with formulae.
3862 if (!EnableNested && !L->empty()) {
3864 if (EnablePhiElim) {
3865 // Remove any extra phis created by processing inner loops.
3866 SmallVector<WeakVH, 16> DeadInsts;
3867 SCEVExpander Rewriter(SE, "lsr");
3868 Changed |= (bool)Rewriter.replaceCongruentIVs(L, &DT, DeadInsts);
3869 Changed |= (bool)DeleteTriviallyDeadInstructions(DeadInsts);
3871 DEBUG(dbgs() << "LSR skipping outer loop " << *L << "\n");
3875 // Start collecting data and preparing for the solver.
3876 CollectInterestingTypesAndFactors();
3877 CollectFixupsAndInitialFormulae();
3878 CollectLoopInvariantFixupsAndFormulae();
3880 DEBUG(dbgs() << "LSR found " << Uses.size() << " uses:\n";
3881 print_uses(dbgs()));
3883 // Now use the reuse data to generate a bunch of interesting ways
3884 // to formulate the values needed for the uses.
3885 GenerateAllReuseFormulae();
3887 FilterOutUndesirableDedicatedRegisters();
3888 NarrowSearchSpaceUsingHeuristics();
3890 SmallVector<const Formula *, 8> Solution;
3893 // Release memory that is no longer needed.
3898 if (Solution.empty())
3902 // Formulae should be legal.
3903 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
3904 E = Uses.end(); I != E; ++I) {
3905 const LSRUse &LU = *I;
3906 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
3907 JE = LU.Formulae.end(); J != JE; ++J)
3908 assert(isLegalUse(J->AM, LU.MinOffset, LU.MaxOffset,
3909 LU.Kind, LU.AccessTy, TLI) &&
3910 "Illegal formula generated!");
3914 // Now that we've decided what we want, make it so.
3915 ImplementSolution(Solution, P);
3917 if (EnablePhiElim) {
3918 // Remove any extra phis created by processing inner loops.
3919 SmallVector<WeakVH, 16> DeadInsts;
3920 SCEVExpander Rewriter(SE, "lsr");
3921 Changed |= (bool)Rewriter.replaceCongruentIVs(L, &DT, DeadInsts);
3922 Changed |= (bool)DeleteTriviallyDeadInstructions(DeadInsts);
3926 void LSRInstance::print_factors_and_types(raw_ostream &OS) const {
3927 if (Factors.empty() && Types.empty()) return;
3929 OS << "LSR has identified the following interesting factors and types: ";
3932 for (SmallSetVector<int64_t, 8>::const_iterator
3933 I = Factors.begin(), E = Factors.end(); I != E; ++I) {
3934 if (!First) OS << ", ";
3939 for (SmallSetVector<Type *, 4>::const_iterator
3940 I = Types.begin(), E = Types.end(); I != E; ++I) {
3941 if (!First) OS << ", ";
3943 OS << '(' << **I << ')';
3948 void LSRInstance::print_fixups(raw_ostream &OS) const {
3949 OS << "LSR is examining the following fixup sites:\n";
3950 for (SmallVectorImpl<LSRFixup>::const_iterator I = Fixups.begin(),
3951 E = Fixups.end(); I != E; ++I) {
3958 void LSRInstance::print_uses(raw_ostream &OS) const {
3959 OS << "LSR is examining the following uses:\n";
3960 for (SmallVectorImpl<LSRUse>::const_iterator I = Uses.begin(),
3961 E = Uses.end(); I != E; ++I) {
3962 const LSRUse &LU = *I;
3966 for (SmallVectorImpl<Formula>::const_iterator J = LU.Formulae.begin(),
3967 JE = LU.Formulae.end(); J != JE; ++J) {
3975 void LSRInstance::print(raw_ostream &OS) const {
3976 print_factors_and_types(OS);
3981 void LSRInstance::dump() const {
3982 print(errs()); errs() << '\n';
3987 class LoopStrengthReduce : public LoopPass {
3988 /// TLI - Keep a pointer of a TargetLowering to consult for determining
3989 /// transformation profitability.
3990 const TargetLowering *const TLI;
3993 static char ID; // Pass ID, replacement for typeid
3994 explicit LoopStrengthReduce(const TargetLowering *tli = 0);
3997 bool runOnLoop(Loop *L, LPPassManager &LPM);
3998 void getAnalysisUsage(AnalysisUsage &AU) const;
4003 char LoopStrengthReduce::ID = 0;
4004 INITIALIZE_PASS_BEGIN(LoopStrengthReduce, "loop-reduce",
4005 "Loop Strength Reduction", false, false)
4006 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
4007 INITIALIZE_PASS_DEPENDENCY(ScalarEvolution)
4008 INITIALIZE_PASS_DEPENDENCY(IVUsers)
4009 INITIALIZE_PASS_DEPENDENCY(LoopInfo)
4010 INITIALIZE_PASS_DEPENDENCY(LoopSimplify)
4011 INITIALIZE_PASS_END(LoopStrengthReduce, "loop-reduce",
4012 "Loop Strength Reduction", false, false)
4015 Pass *llvm::createLoopStrengthReducePass(const TargetLowering *TLI) {
4016 return new LoopStrengthReduce(TLI);
4019 LoopStrengthReduce::LoopStrengthReduce(const TargetLowering *tli)
4020 : LoopPass(ID), TLI(tli) {
4021 initializeLoopStrengthReducePass(*PassRegistry::getPassRegistry());
4024 void LoopStrengthReduce::getAnalysisUsage(AnalysisUsage &AU) const {
4025 // We split critical edges, so we change the CFG. However, we do update
4026 // many analyses if they are around.
4027 AU.addPreservedID(LoopSimplifyID);
4029 AU.addRequired<LoopInfo>();
4030 AU.addPreserved<LoopInfo>();
4031 AU.addRequiredID(LoopSimplifyID);
4032 AU.addRequired<DominatorTree>();
4033 AU.addPreserved<DominatorTree>();
4034 AU.addRequired<ScalarEvolution>();
4035 AU.addPreserved<ScalarEvolution>();
4036 // Requiring LoopSimplify a second time here prevents IVUsers from running
4037 // twice, since LoopSimplify was invalidated by running ScalarEvolution.
4038 AU.addRequiredID(LoopSimplifyID);
4039 AU.addRequired<IVUsers>();
4040 AU.addPreserved<IVUsers>();
4043 bool LoopStrengthReduce::runOnLoop(Loop *L, LPPassManager & /*LPM*/) {
4044 bool Changed = false;
4046 // Run the main LSR transformation.
4047 Changed |= LSRInstance(TLI, L, this).getChanged();
4049 // At this point, it is worth checking to see if any recurrence PHIs are also
4050 // dead, so that we can remove them as well.
4051 Changed |= DeleteDeadPHIs(L->getHeader());