1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Nate Begeman and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/Type.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Analysis/Dominators.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ScalarEvolutionExpander.h"
27 #include "llvm/Support/CFG.h"
28 #include "llvm/Support/GetElementPtrTypeIterator.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/Target/TargetData.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Support/Debug.h"
38 Statistic<> NumReduced ("loop-reduce", "Number of GEPs strength reduced");
42 GEPCache() : CachedPHINode(0), Map() {}
44 GEPCache *get(Value *v) {
45 std::map<Value *, GEPCache>::iterator I = Map.find(v);
47 I = Map.insert(std::pair<Value *, GEPCache>(v, GEPCache())).first;
51 PHINode *CachedPHINode;
52 std::map<Value *, GEPCache> Map;
55 /// IVStrideUse - Keep track of one use of a strided induction variable, where
56 /// the stride is stored externally. The Offset member keeps track of the
57 /// offset from the IV, User is the actual user of the operand, and 'Operand'
58 /// is the operand # of the User that is the use.
62 Value *OperandValToReplace;
64 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
65 : Offset(Offs), User(U), OperandValToReplace(O) {}
68 /// IVUsersOfOneStride - This structure keeps track of all instructions that
69 /// have an operand that is based on the trip count multiplied by some stride.
70 /// The stride for all of these users is common and kept external to this
72 struct IVUsersOfOneStride {
73 /// Users - Keep track of all of the users of this stride as well as the
74 /// initial value and the operand that uses the IV.
75 std::vector<IVStrideUse> Users;
77 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
78 Users.push_back(IVStrideUse(Offset, User, Operand));
83 class LoopStrengthReduce : public FunctionPass {
88 const Type *UIntPtrTy;
91 /// MaxTargetAMSize - This is the maximum power-of-two scale value that the
92 /// target can handle for free with its addressing modes.
93 unsigned MaxTargetAMSize;
95 /// IVUsesByStride - Keep track of all uses of induction variables that we
96 /// are interested in. The key of the map is the stride of the access.
97 std::map<Value*, IVUsersOfOneStride> IVUsesByStride;
99 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
100 /// of the casted version of each value. This is accessed by
101 /// getCastedVersionOf.
102 std::map<Value*, Value*> CastedPointers;
104 /// DeadInsts - Keep track of instructions we may have made dead, so that
105 /// we can remove them after we are done working.
106 std::set<Instruction*> DeadInsts;
108 LoopStrengthReduce(unsigned MTAMS = 1)
109 : MaxTargetAMSize(MTAMS) {
112 virtual bool runOnFunction(Function &) {
113 LI = &getAnalysis<LoopInfo>();
114 DS = &getAnalysis<DominatorSet>();
115 SE = &getAnalysis<ScalarEvolution>();
116 TD = &getAnalysis<TargetData>();
117 UIntPtrTy = TD->getIntPtrType();
120 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
123 CastedPointers.clear();
127 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
128 AU.setPreservesCFG();
129 AU.addRequiredID(LoopSimplifyID);
130 AU.addRequired<LoopInfo>();
131 AU.addRequired<DominatorSet>();
132 AU.addRequired<TargetData>();
133 AU.addRequired<ScalarEvolution>();
136 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
138 Value *getCastedVersionOf(Value *V);
140 void runOnLoop(Loop *L);
141 bool AddUsersIfInteresting(Instruction *I, Loop *L);
142 void AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP, Instruction *I,
145 void StrengthReduceStridedIVUsers(Value *Stride, IVUsersOfOneStride &Uses,
146 Loop *L, bool isOnlyStride);
148 void strengthReduceGEP(GetElementPtrInst *GEPI, Loop *L,
150 Instruction *InsertBefore,
151 std::set<Instruction*> &DeadInsts);
152 void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts);
154 RegisterOpt<LoopStrengthReduce> X("loop-reduce",
155 "Strength Reduce GEP Uses of Ind. Vars");
158 FunctionPass *llvm::createLoopStrengthReducePass(unsigned MaxTargetAMSize) {
159 return new LoopStrengthReduce(MaxTargetAMSize);
162 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
164 Value *LoopStrengthReduce::getCastedVersionOf(Value *V) {
165 if (V->getType() == UIntPtrTy) return V;
166 if (Constant *CB = dyn_cast<Constant>(V))
167 return ConstantExpr::getCast(CB, UIntPtrTy);
169 Value *&New = CastedPointers[V];
172 BasicBlock::iterator InsertPt;
173 if (Argument *Arg = dyn_cast<Argument>(V)) {
174 // Insert into the entry of the function, after any allocas.
175 InsertPt = Arg->getParent()->begin()->begin();
176 while (isa<AllocaInst>(InsertPt)) ++InsertPt;
178 if (InvokeInst *II = dyn_cast<InvokeInst>(V)) {
179 InsertPt = II->getNormalDest()->begin();
181 InsertPt = cast<Instruction>(V);
185 // Do not insert casts into the middle of PHI node blocks.
186 while (isa<PHINode>(InsertPt)) ++InsertPt;
189 return New = new CastInst(V, UIntPtrTy, V->getName(), InsertPt);
193 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
194 /// specified set are trivially dead, delete them and see if this makes any of
195 /// their operands subsequently dead.
196 void LoopStrengthReduce::
197 DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) {
198 while (!Insts.empty()) {
199 Instruction *I = *Insts.begin();
200 Insts.erase(Insts.begin());
201 if (isInstructionTriviallyDead(I)) {
202 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
203 if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i)))
205 SE->deleteInstructionFromRecords(I);
206 I->eraseFromParent();
213 /// CanReduceSCEV - Return true if we can strength reduce this scalar evolution
214 /// in the specified loop.
215 static bool CanReduceSCEV(const SCEVHandle &SH, Loop *L) {
216 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SH);
217 if (!AddRec || AddRec->getLoop() != L) return false;
219 // FIXME: Generalize to non-affine IV's.
220 if (!AddRec->isAffine()) return false;
222 // FIXME: generalize to IV's with more complex strides (must emit stride
223 // expression outside of loop!)
224 if (isa<SCEVConstant>(AddRec->getOperand(1)))
227 // We handle steps by unsigned values, because we know we won't have to insert
229 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(AddRec->getOperand(1)))
230 if (SU->getValue()->getType()->isUnsigned())
233 // Otherwise, no, we can't handle it yet.
238 /// GetAdjustedIndex - Adjust the specified GEP sequential type index to match
239 /// the size of the pointer type, and scale it by the type size.
240 static SCEVHandle GetAdjustedIndex(const SCEVHandle &Idx, uint64_t TySize,
241 const Type *UIntPtrTy) {
242 SCEVHandle Result = Idx;
243 if (Result->getType()->getUnsignedVersion() != UIntPtrTy) {
244 if (UIntPtrTy->getPrimitiveSize() < Result->getType()->getPrimitiveSize())
245 Result = SCEVTruncateExpr::get(Result, UIntPtrTy);
247 Result = SCEVZeroExtendExpr::get(Result, UIntPtrTy);
250 // This index is scaled by the type size being indexed.
252 Result = SCEVMulExpr::get(Result,
253 SCEVConstant::get(ConstantUInt::get(UIntPtrTy,
259 /// AnalyzeGetElementPtrUsers - Analyze all of the users of the specified
260 /// getelementptr instruction, adding them to the IVUsesByStride table. Note
261 /// that we only want to analyze a getelementptr instruction once, and it can
262 /// have multiple operands that are uses of the indvar (e.g. A[i][i]). Because
263 /// of this, we only process a GEP instruction if its first recurrent operand is
264 /// "op", otherwise we will either have already processed it or we will sometime
266 void LoopStrengthReduce::AnalyzeGetElementPtrUsers(GetElementPtrInst *GEP,
267 Instruction *Op, Loop *L) {
268 // Analyze all of the subscripts of this getelementptr instruction, looking
269 // for uses that are determined by the trip count of L. First, skip all
270 // operands the are not dependent on the IV.
272 // Build up the base expression. Insert an LLVM cast of the pointer to
274 SCEVHandle Base = SCEVUnknown::get(getCastedVersionOf(GEP->getOperand(0)));
276 gep_type_iterator GTI = gep_type_begin(GEP);
278 for (; GEP->getOperand(i) != Op; ++i, ++GTI) {
279 // If this is a use of a recurrence that we can analyze, and it comes before
280 // Op does in the GEP operand list, we will handle this when we process this
282 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
283 const StructLayout *SL = TD->getStructLayout(STy);
284 unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue();
285 uint64_t Offset = SL->MemberOffsets[Idx];
286 Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset,
289 SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i));
291 // If this operand is reducible, and it's not the one we are looking at
292 // currently, do not process the GEP at this time.
293 if (CanReduceSCEV(Idx, L))
295 Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx,
296 TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy));
300 // Get the index, convert it to intptr_t.
301 SCEVHandle GEPIndexExpr =
302 GetAdjustedIndex(SE->getSCEV(Op), TD->getTypeSize(GTI.getIndexedType()),
305 // Process all remaining subscripts in the GEP instruction.
306 for (++i, ++GTI; i != GEP->getNumOperands(); ++i, ++GTI)
307 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
308 const StructLayout *SL = TD->getStructLayout(STy);
309 unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue();
310 uint64_t Offset = SL->MemberOffsets[Idx];
311 Base = SCEVAddExpr::get(Base, SCEVUnknown::getIntegerSCEV(Offset,
314 SCEVHandle Idx = SE->getSCEV(GEP->getOperand(i));
315 if (CanReduceSCEV(Idx, L)) { // Another IV subscript
316 GEPIndexExpr = SCEVAddExpr::get(GEPIndexExpr,
317 GetAdjustedIndex(Idx, TD->getTypeSize(GTI.getIndexedType()),
319 assert(CanReduceSCEV(GEPIndexExpr, L) &&
320 "Cannot reduce the sum of two reducible SCEV's??");
322 Base = SCEVAddExpr::get(Base, GetAdjustedIndex(Idx,
323 TD->getTypeSize(GTI.getIndexedType()), UIntPtrTy));
327 assert(CanReduceSCEV(GEPIndexExpr, L) && "Non reducible idx??");
329 // FIXME: If the base is not loop invariant, we currently cannot emit this.
330 if (!Base->isLoopInvariant(L)) {
331 DEBUG(std::cerr << "IGNORING GEP due to non-invariant base: "
336 Base = SCEVAddExpr::get(Base, cast<SCEVAddRecExpr>(GEPIndexExpr)->getStart());
337 SCEVHandle Stride = cast<SCEVAddRecExpr>(GEPIndexExpr)->getOperand(1);
339 DEBUG(std::cerr << "GEP BASE : " << *Base << "\n");
340 DEBUG(std::cerr << "GEP STRIDE: " << *Stride << "\n");
342 Value *Step = 0; // Step of ISE.
343 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(Stride))
344 /// Always get the step value as an unsigned value.
345 Step = ConstantExpr::getCast(SC->getValue(),
346 SC->getValue()->getType()->getUnsignedVersion());
348 Step = cast<SCEVUnknown>(Stride)->getValue();
349 assert(Step->getType()->isUnsigned() && "Bad step value!");
352 // Now that we know the base and stride contributed by the GEP instruction,
353 // process all users.
354 for (Value::use_iterator UI = GEP->use_begin(), E = GEP->use_end();
356 Instruction *User = cast<Instruction>(*UI);
358 // Do not infinitely recurse on PHI nodes.
359 if (isa<PHINode>(User) && User->getParent() == L->getHeader())
362 // If this is an instruction defined in a nested loop, or outside this loop,
363 // don't mess with it.
364 if (LI->getLoopFor(User->getParent()) != L)
367 DEBUG(std::cerr << "FOUND USER: " << *User
368 << " OF STRIDE: " << *Step << " BASE = " << *Base << "\n");
370 // Okay, we found a user that we cannot reduce. Analyze the instruction
371 // and decide what to do with it.
372 IVUsesByStride[Step].addUser(Base, User, GEP);
376 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
377 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
378 /// return true. Otherwise, return false.
379 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L) {
380 if (I->getType() == Type::VoidTy) return false;
381 SCEVHandle ISE = SE->getSCEV(I);
382 if (!CanReduceSCEV(ISE, L)) return false;
384 SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(ISE);
385 SCEVHandle Start = AR->getStart();
387 // Get the step value, canonicalizing to an unsigned integer type so that
388 // lookups in the map will match.
389 Value *Step = 0; // Step of ISE.
390 if (SCEVConstant *SC = dyn_cast<SCEVConstant>(AR->getOperand(1)))
391 /// Always get the step value as an unsigned value.
392 Step = ConstantExpr::getCast(SC->getValue(),
393 SC->getValue()->getType()->getUnsignedVersion());
395 Step = cast<SCEVUnknown>(AR->getOperand(1))->getValue();
396 assert(Step->getType()->isUnsigned() && "Bad step value!");
398 std::set<GetElementPtrInst*> AnalyzedGEPs;
400 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;++UI){
401 Instruction *User = cast<Instruction>(*UI);
403 // Do not infinitely recurse on PHI nodes.
404 if (isa<PHINode>(User) && User->getParent() == L->getHeader())
407 // If this is an instruction defined in a nested loop, or outside this loop,
408 // don't recurse into it.
409 if (LI->getLoopFor(User->getParent()) != L) {
410 DEBUG(std::cerr << "FOUND USER in nested loop: " << *User
411 << " OF SCEV: " << *ISE << "\n");
413 // Okay, we found a user that we cannot reduce. Analyze the instruction
414 // and decide what to do with it.
415 IVUsesByStride[Step].addUser(Start, User, I);
419 // Next, see if this user is analyzable itself!
420 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(User)) {
421 // If this is a getelementptr instruction, figure out what linear
422 // expression of induction variable is actually being used.
423 if (AnalyzedGEPs.insert(GEP).second) // Not already analyzed?
424 AnalyzeGetElementPtrUsers(GEP, I, L);
425 } else if (!AddUsersIfInteresting(User, L)) {
426 DEBUG(std::cerr << "FOUND USER: " << *User
427 << " OF SCEV: " << *ISE << "\n");
429 // Okay, we found a user that we cannot reduce. Analyze the instruction
430 // and decide what to do with it.
431 IVUsesByStride[Step].addUser(Start, User, I);
438 /// BasedUser - For a particular base value, keep information about how we've
439 /// partitioned the expression so far.
441 /// Inst - The instruction using the induction variable.
444 /// OperandValToReplace - The operand value of Inst to replace with the
446 Value *OperandValToReplace;
448 /// Imm - The immediate value that should be added to the base immediately
449 /// before Inst, because it will be folded into the imm field of the
453 /// EmittedBase - The actual value* to use for the base value of this
454 /// operation. This is null if we should just use zero so far.
457 BasedUser(Instruction *I, Value *Op, const SCEVHandle &IMM)
458 : Inst(I), OperandValToReplace(Op), Imm(IMM), EmittedBase(0) {}
461 // No need to compare these.
462 bool operator<(const BasedUser &BU) const { return 0; }
468 void BasedUser::dump() const {
469 std::cerr << " Imm=" << *Imm;
471 std::cerr << " EB=" << *EmittedBase;
473 std::cerr << " Inst: " << *Inst;
476 /// isTargetConstant - Return true if the following can be referenced by the
477 /// immediate field of a target instruction.
478 static bool isTargetConstant(const SCEVHandle &V) {
480 // FIXME: Look at the target to decide if &GV is a legal constant immediate.
481 if (isa<SCEVConstant>(V)) return true;
483 return false; // ENABLE this for x86
485 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
486 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
487 if (CE->getOpcode() == Instruction::Cast)
488 if (isa<GlobalValue>(CE->getOperand(0)))
489 // FIXME: should check to see that the dest is uintptr_t!
494 /// GetImmediateValues - Look at Val, and pull out any additions of constants
495 /// that can fit into the immediate field of instructions in the target.
496 static SCEVHandle GetImmediateValues(SCEVHandle Val, bool isAddress) {
498 return SCEVUnknown::getIntegerSCEV(0, Val->getType());
499 if (isTargetConstant(Val))
502 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
504 for (; i != SAE->getNumOperands(); ++i)
505 if (isTargetConstant(SAE->getOperand(i))) {
506 SCEVHandle ImmVal = SAE->getOperand(i);
508 // If there are any other immediates that we can handle here, pull them
510 for (++i; i != SAE->getNumOperands(); ++i)
511 if (isTargetConstant(SAE->getOperand(i)))
512 ImmVal = SCEVAddExpr::get(ImmVal, SAE->getOperand(i));
515 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
516 // Try to pull immediates out of the start value of nested addrec's.
517 return GetImmediateValues(SARE->getStart(), isAddress);
520 return SCEVUnknown::getIntegerSCEV(0, Val->getType());
523 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
524 /// stride of IV. All of the users may have different starting values, and this
525 /// may not be the only stride (we know it is if isOnlyStride is true).
526 void LoopStrengthReduce::StrengthReduceStridedIVUsers(Value *Stride,
527 IVUsersOfOneStride &Uses,
530 // Transform our list of users and offsets to a bit more complex table. In
531 // this new vector, the first entry for each element is the base of the
532 // strided access, and the second is the BasedUser object for the use. We
533 // progressively move information from the first to the second entry, until we
534 // eventually emit the object.
535 std::vector<std::pair<SCEVHandle, BasedUser> > UsersToProcess;
536 UsersToProcess.reserve(Uses.Users.size());
538 SCEVHandle ZeroBase = SCEVUnknown::getIntegerSCEV(0,
539 Uses.Users[0].Offset->getType());
541 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i)
542 UsersToProcess.push_back(std::make_pair(Uses.Users[i].Offset,
543 BasedUser(Uses.Users[i].User,
544 Uses.Users[i].OperandValToReplace,
547 // First pass, figure out what we can represent in the immediate fields of
548 // instructions. If we can represent anything there, move it to the imm
549 // fields of the BasedUsers.
550 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
551 bool isAddress = isa<LoadInst>(UsersToProcess[i].second.Inst) ||
552 isa<StoreInst>(UsersToProcess[i].second.Inst);
553 UsersToProcess[i].second.Imm = GetImmediateValues(UsersToProcess[i].first,
555 UsersToProcess[i].first = SCEV::getMinusSCEV(UsersToProcess[i].first,
556 UsersToProcess[i].second.Imm);
558 DEBUG(std::cerr << "BASE: " << *UsersToProcess[i].first);
559 DEBUG(UsersToProcess[i].second.dump());
562 SCEVExpander Rewriter(*SE, *LI);
563 BasicBlock *Preheader = L->getLoopPreheader();
564 Instruction *PreInsertPt = Preheader->getTerminator();
565 Instruction *PhiInsertBefore = L->getHeader()->begin();
567 assert(isa<PHINode>(PhiInsertBefore) &&
568 "How could this loop have IV's without any phis?");
569 PHINode *SomeLoopPHI = cast<PHINode>(PhiInsertBefore);
570 assert(SomeLoopPHI->getNumIncomingValues() == 2 &&
571 "This loop isn't canonicalized right");
572 BasicBlock *LatchBlock =
573 SomeLoopPHI->getIncomingBlock(SomeLoopPHI->getIncomingBlock(0) == Preheader);
575 DEBUG(std::cerr << "INSERTING IVs of STRIDE " << *Stride << ":\n");
577 // FIXME: This loop needs increasing levels of intelligence.
578 // STAGE 0: just emit everything as its own base.
579 // STAGE 1: factor out common vars from bases, and try and push resulting
580 // constants into Imm field. <-- We are here
581 // STAGE 2: factor out large constants to try and make more constants
582 // acceptable for target loads and stores.
584 // Sort by the base value, so that all IVs with identical bases are next to
586 std::sort(UsersToProcess.begin(), UsersToProcess.end());
587 while (!UsersToProcess.empty()) {
588 SCEVHandle Base = UsersToProcess.front().first;
590 DEBUG(std::cerr << " INSERTING PHI with BASE = " << *Base << ":\n");
592 // Create a new Phi for this base, and stick it in the loop header.
593 const Type *ReplacedTy = Base->getType();
594 PHINode *NewPHI = new PHINode(ReplacedTy, "iv.", PhiInsertBefore);
596 // Emit the initial base value into the loop preheader, and add it to the
598 Value *BaseV = Rewriter.expandCodeFor(Base, PreInsertPt, ReplacedTy);
599 NewPHI->addIncoming(BaseV, Preheader);
601 // Emit the increment of the base value before the terminator of the loop
602 // latch block, and add it to the Phi node.
603 SCEVHandle Inc = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
604 SCEVUnknown::get(Stride));
606 Value *IncV = Rewriter.expandCodeFor(Inc, LatchBlock->getTerminator(),
608 IncV->setName(NewPHI->getName()+".inc");
609 NewPHI->addIncoming(IncV, LatchBlock);
611 // Emit the code to add the immediate offset to the Phi value, just before
612 // the instructions that we identified as using this stride and base.
613 while (!UsersToProcess.empty() && UsersToProcess.front().first == Base) {
614 BasedUser &User = UsersToProcess.front().second;
616 // Clear the SCEVExpander's expression map so that we are guaranteed
617 // to have the code emitted where we expect it.
619 SCEVHandle NewValSCEV = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
621 Value *Replaced = User.OperandValToReplace;
622 Value *newVal = Rewriter.expandCodeFor(NewValSCEV, User.Inst,
623 Replaced->getType());
625 // Replace the use of the operand Value with the new Phi we just created.
626 User.Inst->replaceUsesOfWith(Replaced, newVal);
627 DEBUG(std::cerr << " CHANGED: IMM =" << *User.Imm << " Inst = "
630 // Mark old value we replaced as possibly dead, so that it is elminated
631 // if we just replaced the last use of that value.
632 DeadInsts.insert(cast<Instruction>(Replaced));
634 UsersToProcess.erase(UsersToProcess.begin());
637 // TODO: Next, find out which base index is the most common, pull it out.
640 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
641 // different starting values, into different PHIs.
643 // BEFORE writing this, it's probably useful to handle GEP's.
645 // NOTE: pull all constants together, for REG+IMM addressing, include &GV in
646 // 'IMM' if the target supports it.
650 void LoopStrengthReduce::runOnLoop(Loop *L) {
651 // First step, transform all loops nesting inside of this loop.
652 for (LoopInfo::iterator I = L->begin(), E = L->end(); I != E; ++I)
655 // Next, find all uses of induction variables in this loop, and catagorize
656 // them by stride. Start by finding all of the PHI nodes in the header for
657 // this loop. If they are induction variables, inspect their uses.
658 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
659 AddUsersIfInteresting(I, L);
661 // If we have nothing to do, return.
662 //if (IVUsesByStride.empty()) return;
664 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
665 // doing computation in byte values, promote to 32-bit values if safe.
667 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
668 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should be
669 // codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. Need
670 // to be careful that IV's are all the same type. Only works for intptr_t
673 // If we only have one stride, we can more aggressively eliminate some things.
674 bool HasOneStride = IVUsesByStride.size() == 1;
676 for (std::map<Value*, IVUsersOfOneStride>::iterator SI
677 = IVUsesByStride.begin(), E = IVUsesByStride.end(); SI != E; ++SI)
678 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
680 // Clean up after ourselves
681 if (!DeadInsts.empty()) {
682 DeleteTriviallyDeadInstructions(DeadInsts);
684 BasicBlock::iterator I = L->getHeader()->begin();
686 while ((PN = dyn_cast<PHINode>(I))) {
687 ++I; // Preincrement iterator to avoid invalidating it when deleting PN.
689 // At this point, we know that we have killed one or more GEP instructions.
690 // It is worth checking to see if the cann indvar is also dead, so that we
691 // can remove it as well. The requirements for the cann indvar to be
692 // considered dead are:
693 // 1. the cann indvar has one use
694 // 2. the use is an add instruction
695 // 3. the add has one use
696 // 4. the add is used by the cann indvar
697 // If all four cases above are true, then we can remove both the add and
699 // FIXME: this needs to eliminate an induction variable even if it's being
700 // compared against some value to decide loop termination.
701 if (PN->hasOneUse()) {
702 BinaryOperator *BO = dyn_cast<BinaryOperator>(*(PN->use_begin()));
703 if (BO && BO->hasOneUse()) {
704 if (PN == *(BO->use_begin())) {
705 DeadInsts.insert(BO);
706 // Break the cycle, then delete the PHI.
707 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
708 SE->deleteInstructionFromRecords(PN);
709 PN->eraseFromParent();
714 DeleteTriviallyDeadInstructions(DeadInsts);
717 IVUsesByStride.clear();