1 //===- LoopStrengthReduce.cpp - Strength Reduce GEPs in Loops -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Nate Begeman and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs a strength reduction on array references inside loops that
11 // have as one or more of their components the loop induction variable. This is
12 // accomplished by creating a new Value to hold the initial value of the array
13 // access for the first iteration, and then creating a new GEP instruction in
14 // the loop to increment the value by the appropriate amount.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "loop-reduce"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Instructions.h"
22 #include "llvm/Type.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Analysis/Dominators.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/ScalarEvolutionExpander.h"
27 #include "llvm/Support/CFG.h"
28 #include "llvm/Support/GetElementPtrTypeIterator.h"
29 #include "llvm/Transforms/Utils/Local.h"
30 #include "llvm/Target/TargetData.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Support/Debug.h"
38 Statistic<> NumReduced ("loop-reduce", "Number of GEPs strength reduced");
40 /// IVStrideUse - Keep track of one use of a strided induction variable, where
41 /// the stride is stored externally. The Offset member keeps track of the
42 /// offset from the IV, User is the actual user of the operand, and 'Operand'
43 /// is the operand # of the User that is the use.
47 Value *OperandValToReplace;
49 IVStrideUse(const SCEVHandle &Offs, Instruction *U, Value *O)
50 : Offset(Offs), User(U), OperandValToReplace(O) {}
53 /// IVUsersOfOneStride - This structure keeps track of all instructions that
54 /// have an operand that is based on the trip count multiplied by some stride.
55 /// The stride for all of these users is common and kept external to this
57 struct IVUsersOfOneStride {
58 /// Users - Keep track of all of the users of this stride as well as the
59 /// initial value and the operand that uses the IV.
60 std::vector<IVStrideUse> Users;
62 void addUser(const SCEVHandle &Offset,Instruction *User, Value *Operand) {
63 Users.push_back(IVStrideUse(Offset, User, Operand));
68 class LoopStrengthReduce : public FunctionPass {
73 const Type *UIntPtrTy;
76 /// MaxTargetAMSize - This is the maximum power-of-two scale value that the
77 /// target can handle for free with its addressing modes.
78 unsigned MaxTargetAMSize;
80 /// IVUsesByStride - Keep track of all uses of induction variables that we
81 /// are interested in. The key of the map is the stride of the access.
82 std::map<Value*, IVUsersOfOneStride> IVUsesByStride;
84 /// CastedValues - As we need to cast values to uintptr_t, this keeps track
85 /// of the casted version of each value. This is accessed by
86 /// getCastedVersionOf.
87 std::map<Value*, Value*> CastedPointers;
89 /// DeadInsts - Keep track of instructions we may have made dead, so that
90 /// we can remove them after we are done working.
91 std::set<Instruction*> DeadInsts;
93 LoopStrengthReduce(unsigned MTAMS = 1)
94 : MaxTargetAMSize(MTAMS) {
97 virtual bool runOnFunction(Function &) {
98 LI = &getAnalysis<LoopInfo>();
99 DS = &getAnalysis<DominatorSet>();
100 SE = &getAnalysis<ScalarEvolution>();
101 TD = &getAnalysis<TargetData>();
102 UIntPtrTy = TD->getIntPtrType();
105 for (LoopInfo::iterator I = LI->begin(), E = LI->end(); I != E; ++I)
108 CastedPointers.clear();
112 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
113 AU.setPreservesCFG();
114 AU.addRequiredID(LoopSimplifyID);
115 AU.addRequired<LoopInfo>();
116 AU.addRequired<DominatorSet>();
117 AU.addRequired<TargetData>();
118 AU.addRequired<ScalarEvolution>();
121 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
123 Value *getCastedVersionOf(Value *V);
125 void runOnLoop(Loop *L);
126 bool AddUsersIfInteresting(Instruction *I, Loop *L,
127 std::set<Instruction*> &Processed);
128 SCEVHandle GetExpressionSCEV(Instruction *E, Loop *L);
131 void StrengthReduceStridedIVUsers(Value *Stride, IVUsersOfOneStride &Uses,
132 Loop *L, bool isOnlyStride);
133 void DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts);
135 RegisterOpt<LoopStrengthReduce> X("loop-reduce",
136 "Strength Reduce GEP Uses of Ind. Vars");
139 FunctionPass *llvm::createLoopStrengthReducePass(unsigned MaxTargetAMSize) {
140 return new LoopStrengthReduce(MaxTargetAMSize);
143 /// getCastedVersionOf - Return the specified value casted to uintptr_t.
145 Value *LoopStrengthReduce::getCastedVersionOf(Value *V) {
146 if (V->getType() == UIntPtrTy) return V;
147 if (Constant *CB = dyn_cast<Constant>(V))
148 return ConstantExpr::getCast(CB, UIntPtrTy);
150 Value *&New = CastedPointers[V];
153 BasicBlock::iterator InsertPt;
154 if (Argument *Arg = dyn_cast<Argument>(V)) {
155 // Insert into the entry of the function, after any allocas.
156 InsertPt = Arg->getParent()->begin()->begin();
157 while (isa<AllocaInst>(InsertPt)) ++InsertPt;
159 if (InvokeInst *II = dyn_cast<InvokeInst>(V)) {
160 InsertPt = II->getNormalDest()->begin();
162 InsertPt = cast<Instruction>(V);
166 // Do not insert casts into the middle of PHI node blocks.
167 while (isa<PHINode>(InsertPt)) ++InsertPt;
170 New = new CastInst(V, UIntPtrTy, V->getName(), InsertPt);
171 DeadInsts.insert(cast<Instruction>(New));
176 /// DeleteTriviallyDeadInstructions - If any of the instructions is the
177 /// specified set are trivially dead, delete them and see if this makes any of
178 /// their operands subsequently dead.
179 void LoopStrengthReduce::
180 DeleteTriviallyDeadInstructions(std::set<Instruction*> &Insts) {
181 while (!Insts.empty()) {
182 Instruction *I = *Insts.begin();
183 Insts.erase(Insts.begin());
184 if (isInstructionTriviallyDead(I)) {
185 for (unsigned i = 0, e = I->getNumOperands(); i != e; ++i)
186 if (Instruction *U = dyn_cast<Instruction>(I->getOperand(i)))
188 SE->deleteInstructionFromRecords(I);
189 I->eraseFromParent();
196 /// GetExpressionSCEV - Compute and return the SCEV for the specified
198 SCEVHandle LoopStrengthReduce::GetExpressionSCEV(Instruction *Exp, Loop *L) {
199 GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Exp);
201 return SE->getSCEV(Exp);
203 // Analyze all of the subscripts of this getelementptr instruction, looking
204 // for uses that are determined by the trip count of L. First, skip all
205 // operands the are not dependent on the IV.
207 // Build up the base expression. Insert an LLVM cast of the pointer to
209 SCEVHandle GEPVal = SCEVUnknown::get(getCastedVersionOf(GEP->getOperand(0)));
211 gep_type_iterator GTI = gep_type_begin(GEP);
213 for (unsigned i = 1, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
214 // If this is a use of a recurrence that we can analyze, and it comes before
215 // Op does in the GEP operand list, we will handle this when we process this
217 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
218 const StructLayout *SL = TD->getStructLayout(STy);
219 unsigned Idx = cast<ConstantUInt>(GEP->getOperand(i))->getValue();
220 uint64_t Offset = SL->MemberOffsets[Idx];
221 GEPVal = SCEVAddExpr::get(GEPVal,
222 SCEVUnknown::getIntegerSCEV(Offset, UIntPtrTy));
224 Value *OpVal = getCastedVersionOf(GEP->getOperand(i));
225 SCEVHandle Idx = SE->getSCEV(OpVal);
227 uint64_t TypeSize = TD->getTypeSize(GTI.getIndexedType());
229 Idx = SCEVMulExpr::get(Idx,
230 SCEVConstant::get(ConstantUInt::get(UIntPtrTy,
232 GEPVal = SCEVAddExpr::get(GEPVal, Idx);
239 /// getSCEVStartAndStride - Compute the start and stride of this expression,
240 /// returning false if the expression is not a start/stride pair, or true if it
241 /// is. The stride must be a loop invariant expression, but the start may be
242 /// a mix of loop invariant and loop variant expressions.
243 static bool getSCEVStartAndStride(const SCEVHandle &SH, Loop *L,
244 SCEVHandle &Start, Value *&Stride) {
245 SCEVHandle TheAddRec = Start; // Initialize to zero.
247 // If the outer level is an AddExpr, the operands are all start values except
248 // for a nested AddRecExpr.
249 if (SCEVAddExpr *AE = dyn_cast<SCEVAddExpr>(SH)) {
250 for (unsigned i = 0, e = AE->getNumOperands(); i != e; ++i)
251 if (SCEVAddRecExpr *AddRec =
252 dyn_cast<SCEVAddRecExpr>(AE->getOperand(i))) {
253 if (AddRec->getLoop() == L)
254 TheAddRec = SCEVAddExpr::get(AddRec, TheAddRec);
256 return false; // Nested IV of some sort?
258 Start = SCEVAddExpr::get(Start, AE->getOperand(i));
261 } else if (SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(SH)) {
264 return false; // not analyzable.
267 SCEVAddRecExpr *AddRec = dyn_cast<SCEVAddRecExpr>(TheAddRec);
268 if (!AddRec || AddRec->getLoop() != L) return false;
270 // FIXME: Generalize to non-affine IV's.
271 if (!AddRec->isAffine()) return false;
273 Start = SCEVAddExpr::get(Start, AddRec->getOperand(0));
275 // FIXME: generalize to IV's with more complex strides (must emit stride
276 // expression outside of loop!)
277 if (!isa<SCEVConstant>(AddRec->getOperand(1)))
280 SCEVConstant *StrideC = cast<SCEVConstant>(AddRec->getOperand(1));
281 Stride = StrideC->getValue();
283 assert(Stride->getType()->isUnsigned() &&
284 "Constants should be canonicalized to unsigned!");
288 /// AddUsersIfInteresting - Inspect the specified instruction. If it is a
289 /// reducible SCEV, recursively add its users to the IVUsesByStride set and
290 /// return true. Otherwise, return false.
291 bool LoopStrengthReduce::AddUsersIfInteresting(Instruction *I, Loop *L,
292 std::set<Instruction*> &Processed) {
293 if (I->getType() == Type::VoidTy) return false;
294 if (!Processed.insert(I).second)
295 return true; // Instruction already handled.
297 // Get the symbolic expression for this instruction.
298 SCEVHandle ISE = GetExpressionSCEV(I, L);
299 if (isa<SCEVCouldNotCompute>(ISE)) return false;
301 // Get the start and stride for this expression.
302 SCEVHandle Start = SCEVUnknown::getIntegerSCEV(0, ISE->getType());
304 if (!getSCEVStartAndStride(ISE, L, Start, Stride))
305 return false; // Non-reducible symbolic expression, bail out.
307 for (Value::use_iterator UI = I->use_begin(), E = I->use_end(); UI != E;++UI){
308 Instruction *User = cast<Instruction>(*UI);
310 // Do not infinitely recurse on PHI nodes.
311 if (isa<PHINode>(User) && User->getParent() == L->getHeader())
314 // If this is an instruction defined in a nested loop, or outside this loop,
315 // don't recurse into it.
316 bool AddUserToIVUsers = false;
317 if (LI->getLoopFor(User->getParent()) != L) {
318 DEBUG(std::cerr << "FOUND USER in nested loop: " << *User
319 << " OF SCEV: " << *ISE << "\n");
320 AddUserToIVUsers = true;
321 } else if (!AddUsersIfInteresting(User, L, Processed)) {
322 DEBUG(std::cerr << "FOUND USER: " << *User
323 << " OF SCEV: " << *ISE << "\n");
324 AddUserToIVUsers = true;
327 if (AddUserToIVUsers) {
328 // Okay, we found a user that we cannot reduce. Analyze the instruction
329 // and decide what to do with it.
330 IVUsesByStride[Stride].addUser(Start, User, I);
337 /// BasedUser - For a particular base value, keep information about how we've
338 /// partitioned the expression so far.
340 /// Inst - The instruction using the induction variable.
343 /// OperandValToReplace - The operand value of Inst to replace with the
345 Value *OperandValToReplace;
347 /// Imm - The immediate value that should be added to the base immediately
348 /// before Inst, because it will be folded into the imm field of the
352 /// EmittedBase - The actual value* to use for the base value of this
353 /// operation. This is null if we should just use zero so far.
356 BasedUser(Instruction *I, Value *Op, const SCEVHandle &IMM)
357 : Inst(I), OperandValToReplace(Op), Imm(IMM), EmittedBase(0) {}
360 // No need to compare these.
361 bool operator<(const BasedUser &BU) const { return 0; }
367 void BasedUser::dump() const {
368 std::cerr << " Imm=" << *Imm;
370 std::cerr << " EB=" << *EmittedBase;
372 std::cerr << " Inst: " << *Inst;
375 /// isTargetConstant - Return true if the following can be referenced by the
376 /// immediate field of a target instruction.
377 static bool isTargetConstant(const SCEVHandle &V) {
379 // FIXME: Look at the target to decide if &GV is a legal constant immediate.
380 if (isa<SCEVConstant>(V)) return true;
382 return false; // ENABLE this for x86
384 if (SCEVUnknown *SU = dyn_cast<SCEVUnknown>(V))
385 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(SU->getValue()))
386 if (CE->getOpcode() == Instruction::Cast)
387 if (isa<GlobalValue>(CE->getOperand(0)))
388 // FIXME: should check to see that the dest is uintptr_t!
393 /// GetImmediateValues - Look at Val, and pull out any additions of constants
394 /// that can fit into the immediate field of instructions in the target.
395 static SCEVHandle GetImmediateValues(SCEVHandle Val, bool isAddress, Loop *L) {
396 if (isAddress && isTargetConstant(Val))
399 if (SCEVAddExpr *SAE = dyn_cast<SCEVAddExpr>(Val)) {
401 SCEVHandle Imm = SCEVUnknown::getIntegerSCEV(0, Val->getType());
403 for (; i != SAE->getNumOperands(); ++i)
404 if (isAddress && isTargetConstant(SAE->getOperand(i))) {
405 Imm = SCEVAddExpr::get(Imm, SAE->getOperand(i));
406 } else if (!SAE->getOperand(i)->isLoopInvariant(L)) {
407 // If this is a loop-variant expression, it must stay in the immediate
408 // field of the expression.
409 Imm = SCEVAddExpr::get(Imm, SAE->getOperand(i));
413 } else if (SCEVAddRecExpr *SARE = dyn_cast<SCEVAddRecExpr>(Val)) {
414 // Try to pull immediates out of the start value of nested addrec's.
415 return GetImmediateValues(SARE->getStart(), isAddress, L);
418 if (!Val->isLoopInvariant(L)) {
419 // If this is a loop-variant expression, it must stay in the immediate
420 // field of the expression.
424 return SCEVUnknown::getIntegerSCEV(0, Val->getType());
427 /// StrengthReduceStridedIVUsers - Strength reduce all of the users of a single
428 /// stride of IV. All of the users may have different starting values, and this
429 /// may not be the only stride (we know it is if isOnlyStride is true).
430 void LoopStrengthReduce::StrengthReduceStridedIVUsers(Value *Stride,
431 IVUsersOfOneStride &Uses,
434 // Transform our list of users and offsets to a bit more complex table. In
435 // this new vector, the first entry for each element is the base of the
436 // strided access, and the second is the BasedUser object for the use. We
437 // progressively move information from the first to the second entry, until we
438 // eventually emit the object.
439 std::vector<std::pair<SCEVHandle, BasedUser> > UsersToProcess;
440 UsersToProcess.reserve(Uses.Users.size());
442 SCEVHandle ZeroBase = SCEVUnknown::getIntegerSCEV(0,
443 Uses.Users[0].Offset->getType());
445 for (unsigned i = 0, e = Uses.Users.size(); i != e; ++i)
446 UsersToProcess.push_back(std::make_pair(Uses.Users[i].Offset,
447 BasedUser(Uses.Users[i].User,
448 Uses.Users[i].OperandValToReplace,
451 // First pass, figure out what we can represent in the immediate fields of
452 // instructions. If we can represent anything there, move it to the imm
453 // fields of the BasedUsers.
454 for (unsigned i = 0, e = UsersToProcess.size(); i != e; ++i) {
455 // Addressing modes can be folded into loads and stores. Be careful that
456 // the store is through the expression, not of the expression though.
457 bool isAddress = isa<LoadInst>(UsersToProcess[i].second.Inst);
458 if (StoreInst *SI = dyn_cast<StoreInst>(UsersToProcess[i].second.Inst))
459 if (SI->getOperand(1) == UsersToProcess[i].second.OperandValToReplace)
462 UsersToProcess[i].second.Imm =
463 GetImmediateValues(UsersToProcess[i].first, isAddress, L);
465 UsersToProcess[i].first = SCEV::getMinusSCEV(UsersToProcess[i].first,
466 UsersToProcess[i].second.Imm);
468 DEBUG(std::cerr << "BASE: " << *UsersToProcess[i].first);
469 DEBUG(UsersToProcess[i].second.dump());
472 SCEVExpander Rewriter(*SE, *LI);
473 BasicBlock *Preheader = L->getLoopPreheader();
474 Instruction *PreInsertPt = Preheader->getTerminator();
475 Instruction *PhiInsertBefore = L->getHeader()->begin();
477 assert(isa<PHINode>(PhiInsertBefore) &&
478 "How could this loop have IV's without any phis?");
479 PHINode *SomeLoopPHI = cast<PHINode>(PhiInsertBefore);
480 assert(SomeLoopPHI->getNumIncomingValues() == 2 &&
481 "This loop isn't canonicalized right");
482 BasicBlock *LatchBlock =
483 SomeLoopPHI->getIncomingBlock(SomeLoopPHI->getIncomingBlock(0) == Preheader);
485 DEBUG(std::cerr << "INSERTING IVs of STRIDE " << *Stride << ":\n");
487 // FIXME: This loop needs increasing levels of intelligence.
488 // STAGE 0: just emit everything as its own base.
489 // STAGE 1: factor out common vars from bases, and try and push resulting
490 // constants into Imm field. <-- We are here
491 // STAGE 2: factor out large constants to try and make more constants
492 // acceptable for target loads and stores.
494 // Sort by the base value, so that all IVs with identical bases are next to
496 std::sort(UsersToProcess.begin(), UsersToProcess.end());
497 while (!UsersToProcess.empty()) {
498 SCEVHandle Base = UsersToProcess.front().first;
500 DEBUG(std::cerr << " INSERTING PHI with BASE = " << *Base << ":\n");
502 // Create a new Phi for this base, and stick it in the loop header.
503 const Type *ReplacedTy = Base->getType();
504 PHINode *NewPHI = new PHINode(ReplacedTy, "iv.", PhiInsertBefore);
506 // Emit the initial base value into the loop preheader, and add it to the
508 Value *BaseV = Rewriter.expandCodeFor(Base, PreInsertPt, ReplacedTy);
509 NewPHI->addIncoming(BaseV, Preheader);
511 // Emit the increment of the base value before the terminator of the loop
512 // latch block, and add it to the Phi node.
513 SCEVHandle Inc = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
514 SCEVUnknown::get(Stride));
516 Value *IncV = Rewriter.expandCodeFor(Inc, LatchBlock->getTerminator(),
518 IncV->setName(NewPHI->getName()+".inc");
519 NewPHI->addIncoming(IncV, LatchBlock);
521 // Emit the code to add the immediate offset to the Phi value, just before
522 // the instructions that we identified as using this stride and base.
523 while (!UsersToProcess.empty() && UsersToProcess.front().first == Base) {
524 BasedUser &User = UsersToProcess.front().second;
526 // Clear the SCEVExpander's expression map so that we are guaranteed
527 // to have the code emitted where we expect it.
529 SCEVHandle NewValSCEV = SCEVAddExpr::get(SCEVUnknown::get(NewPHI),
531 Value *Replaced = User.OperandValToReplace;
532 Value *newVal = Rewriter.expandCodeFor(NewValSCEV, User.Inst,
533 Replaced->getType());
535 // Replace the use of the operand Value with the new Phi we just created.
536 User.Inst->replaceUsesOfWith(Replaced, newVal);
537 DEBUG(std::cerr << " CHANGED: IMM =" << *User.Imm << " Inst = "
540 // Mark old value we replaced as possibly dead, so that it is elminated
541 // if we just replaced the last use of that value.
542 DeadInsts.insert(cast<Instruction>(Replaced));
544 UsersToProcess.erase(UsersToProcess.begin());
547 // TODO: Next, find out which base index is the most common, pull it out.
550 // IMPORTANT TODO: Figure out how to partition the IV's with this stride, but
551 // different starting values, into different PHIs.
553 // BEFORE writing this, it's probably useful to handle GEP's.
555 // NOTE: pull all constants together, for REG+IMM addressing, include &GV in
556 // 'IMM' if the target supports it.
560 void LoopStrengthReduce::runOnLoop(Loop *L) {
561 // First step, transform all loops nesting inside of this loop.
562 for (LoopInfo::iterator I = L->begin(), E = L->end(); I != E; ++I)
565 // Next, find all uses of induction variables in this loop, and catagorize
566 // them by stride. Start by finding all of the PHI nodes in the header for
567 // this loop. If they are induction variables, inspect their uses.
568 std::set<Instruction*> Processed; // Don't reprocess instructions.
569 for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
570 AddUsersIfInteresting(I, L, Processed);
572 // If we have nothing to do, return.
573 //if (IVUsesByStride.empty()) return;
575 // FIXME: We can widen subreg IV's here for RISC targets. e.g. instead of
576 // doing computation in byte values, promote to 32-bit values if safe.
578 // FIXME: Attempt to reuse values across multiple IV's. In particular, we
579 // could have something like "for(i) { foo(i*8); bar(i*16) }", which should be
580 // codegened as "for (j = 0;; j+=8) { foo(j); bar(j+j); }" on X86/PPC. Need
581 // to be careful that IV's are all the same type. Only works for intptr_t
584 // If we only have one stride, we can more aggressively eliminate some things.
585 bool HasOneStride = IVUsesByStride.size() == 1;
587 for (std::map<Value*, IVUsersOfOneStride>::iterator SI
588 = IVUsesByStride.begin(), E = IVUsesByStride.end(); SI != E; ++SI)
589 StrengthReduceStridedIVUsers(SI->first, SI->second, L, HasOneStride);
591 // Clean up after ourselves
592 if (!DeadInsts.empty()) {
593 DeleteTriviallyDeadInstructions(DeadInsts);
595 BasicBlock::iterator I = L->getHeader()->begin();
597 while ((PN = dyn_cast<PHINode>(I))) {
598 ++I; // Preincrement iterator to avoid invalidating it when deleting PN.
600 // At this point, we know that we have killed one or more GEP instructions.
601 // It is worth checking to see if the cann indvar is also dead, so that we
602 // can remove it as well. The requirements for the cann indvar to be
603 // considered dead are:
604 // 1. the cann indvar has one use
605 // 2. the use is an add instruction
606 // 3. the add has one use
607 // 4. the add is used by the cann indvar
608 // If all four cases above are true, then we can remove both the add and
610 // FIXME: this needs to eliminate an induction variable even if it's being
611 // compared against some value to decide loop termination.
612 if (PN->hasOneUse()) {
613 BinaryOperator *BO = dyn_cast<BinaryOperator>(*(PN->use_begin()));
614 if (BO && BO->hasOneUse()) {
615 if (PN == *(BO->use_begin())) {
616 DeadInsts.insert(BO);
617 // Break the cycle, then delete the PHI.
618 PN->replaceAllUsesWith(UndefValue::get(PN->getType()));
619 SE->deleteInstructionFromRecords(PN);
620 PN->eraseFromParent();
625 DeleteTriviallyDeadInstructions(DeadInsts);
628 IVUsesByStride.clear();