const ModRefInfo Mode) {
assert(I1.getParent() == I2.getParent() &&
"Instructions not in same basic block!");
- BasicBlock::const_iterator I = &I1;
- BasicBlock::const_iterator E = &I2;
+ BasicBlock::const_iterator I = I1.getIterator();
+ BasicBlock::const_iterator E = I2.getIterator();
++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
- if (getModRefInfo(I, Loc) & Mode)
+ if (getModRefInfo(&*I, Loc) & Mode)
return true;
return false;
}
SetVector<Value *> Loads;
SetVector<Value *> Stores;
- for (Function::arg_iterator I = F.arg_begin(), E = F.arg_end(); I != E; ++I)
- if (I->getType()->isPointerTy()) // Add all pointer arguments.
- Pointers.insert(I);
+ for (auto &I : F.args())
+ if (I.getType()->isPointerTy()) // Add all pointer arguments.
+ Pointers.insert(&I);
for (inst_iterator I = inst_begin(F), E = inst_end(F); I != E; ++I) {
if (I->getType()->isPointerTy()) // Add all pointer instructions.
if (Cur->Forward || !Cur->aliasesPointer(Ptr, Size, AAInfo, AA)) continue;
if (!FoundSet) { // If this is the first alias set ptr can go into.
- FoundSet = Cur; // Remember it.
+ FoundSet = &*Cur; // Remember it.
} else { // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
if (Cur->Forward || !Cur->aliasesUnknownInst(Inst, AA))
continue;
if (!FoundSet) // If this is the first alias set ptr can go into.
- FoundSet = Cur; // Remember it.
+ FoundSet = &*Cur; // Remember it.
else if (!Cur->Forward) // Otherwise, we must merge the sets.
FoundSet->mergeSetIn(*Cur, *this); // Merge in contents.
}
}
void AliasSetTracker::add(BasicBlock &BB) {
- for (BasicBlock::iterator I = BB.begin(), E = BB.end(); I != E; ++I)
- add(I);
+ for (auto &I : BB)
+ add(&I);
}
void AliasSetTracker::add(const AliasSetTracker &AST) {
// the Values cannot come from different iterations of a potential cycle the
// phi nodes could be involved in.
for (auto *P : VisitedPhiBBs)
- if (isPotentiallyReachable(P->begin(), Inst, DT, LI))
+ if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
return false;
return true;
typedef Function::const_iterator nodes_iterator;
static inline const NodeType *getEntryNode(const BlockFrequencyInfo *G) {
- return G->getFunction()->begin();
+ return &G->getFunction()->front();
}
static ChildIteratorType child_begin(const NodeType *N) {
return succ_begin(N);
// We print the probabilities from the last function the analysis ran over,
// or the function it is currently running over.
assert(LastF && "Cannot print prior to running over a function");
- for (Function::const_iterator BI = LastF->begin(), BE = LastF->end();
- BI != BE; ++BI) {
- for (succ_const_iterator SI = succ_begin(BI), SE = succ_end(BI);
- SI != SE; ++SI) {
- printEdgeProbability(OS << " ", BI, *SI);
+ for (const auto &BI : *LastF) {
+ for (succ_const_iterator SI = succ_begin(&BI), SE = succ_end(&BI); SI != SE;
+ ++SI) {
+ printEdgeProbability(OS << " ", &BI, *SI);
}
}
}
return true;
// Linear scan, start at 'A', see whether we hit 'B' or the end first.
- for (BasicBlock::const_iterator I = A, E = BB->end(); I != E; ++I) {
+ for (BasicBlock::const_iterator I = A->getIterator(), E = BB->end(); I != E;
+ ++I) {
if (&*I == B)
return true;
}
for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
II != E; ++II) {
// Skip ephemeral values.
- if (EphValues.count(II))
+ if (EphValues.count(&*II))
continue;
// Special handling for calls.
for (Function::iterator B = F->begin(), BE = F->end(); B != BE; ++B) {
for (BasicBlock::iterator it = B->begin(), e = B->end(); it != e; ++it) {
- Instruction *Inst = it;
+ Instruction *Inst = &*it;
unsigned Cost = getInstructionCost(Inst);
if (Cost != (unsigned)-1)
OS << "Cost Model: Found an estimated cost of " << Cost;
for (auto I = IPostDom->begin(); isa<PHINode>(I); ++I) {
// A PHINode is uniform if it returns the same value no matter which path is
// taken.
- if (!cast<PHINode>(I)->hasConstantValue() && DV.insert(I).second)
- Worklist.push_back(I);
+ if (!cast<PHINode>(I)->hasConstantValue() && DV.insert(&*I).second)
+ Worklist.push_back(&*I);
}
// Propagation rule 2: if a value defined in a loop is used outside, the user
// them by stride. Start by finding all of the PHI nodes in the header for
// this loop. If they are induction variables, inspect their uses.
for (BasicBlock::iterator I = L->getHeader()->begin(); isa<PHINode>(I); ++I)
- (void)AddUsersIfInteresting(I);
+ (void)AddUsersIfInteresting(&*I);
return false;
}
continue;
// Skip ephemeral values.
- if (EphValues.count(I))
+ if (EphValues.count(&*I))
continue;
++NumInstructions;
// all of the per-instruction logic. The visit tree returns true if we
// consumed the instruction in any way, and false if the instruction's base
// cost should count against inlining.
- if (Base::visit(I))
+ if (Base::visit(&*I))
++NumInstructionsSimplified;
else
Cost += InlineConstants::InstrCost;
FAI != FAE; ++FAI, ++CAI) {
assert(CAI != CS.arg_end());
if (Constant *C = dyn_cast<Constant>(CAI))
- SimplifiedValues[FAI] = C;
+ SimplifiedValues[&*FAI] = C;
Value *PtrArg = *CAI;
if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
- ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
+ ConstantOffsetPtrs[&*FAI] = std::make_pair(PtrArg, C->getValue());
// We can SROA any pointer arguments derived from alloca instructions.
if (isa<AllocaInst>(PtrArg)) {
- SROAArgValues[FAI] = PtrArg;
+ SROAArgValues[&*FAI] = PtrArg;
SROAArgCosts[PtrArg] = 0;
}
}
if (isa<IndirectBrInst>(BI->getTerminator()) || BI->hasAddressTaken())
return false;
- for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
- ++II) {
- CallSite CS(II);
+ for (auto &II : *BI) {
+ CallSite CS(&II);
if (!CS)
continue;
for (; AI != AE; ++AI) {
Value *Actual = *AI;
if (PI != PE) {
- Argument *Formal = PI++;
+ Argument *Formal = &*PI++;
Assert(Formal->getType() == Actual->getType(),
"Undefined behavior: Call argument type mismatches "
"callee parameter type",
void Lint::visitUnreachableInst(UnreachableInst &I) {
// This isn't undefined behavior, it's merely suspicious.
- Assert(&I == I.getParent()->begin() ||
- std::prev(BasicBlock::iterator(&I))->mayHaveSideEffects(),
+ Assert(&I == &I.getParent()->front() ||
+ std::prev(I.getIterator())->mayHaveSideEffects(),
"Unusual: unreachable immediately preceded by instruction without "
"side effects",
&I);
// TODO: Look through vector insert/extract/shuffle.
V = OffsetOk ? GetUnderlyingObject(V, *DL) : V->stripPointerCasts();
if (LoadInst *L = dyn_cast<LoadInst>(V)) {
- BasicBlock::iterator BBI = L;
+ BasicBlock::iterator BBI = L->getIterator();
BasicBlock *BB = L->getParent();
SmallPtrSet<BasicBlock *, 4> VisitedBlocks;
for (;;) {
// from/to. If so, the previous load or store would have already trapped,
// so there is no harm doing an extra load (also, CSE will later eliminate
// the load entirely).
- BasicBlock::iterator BBI = ScanFrom, E = ScanFrom->getParent()->begin();
+ BasicBlock::iterator BBI = ScanFrom->getIterator(),
+ E = ScanFrom->getParent()->begin();
// We can at least always strip pointer casts even though we can't use the
// base here.
while (ScanFrom != ScanBB->begin()) {
// We must ignore debug info directives when counting (otherwise they
// would affect codegen).
- Instruction *Inst = --ScanFrom;
+ Instruction *Inst = &*--ScanFrom;
if (isa<DbgInfoIntrinsic>(Inst))
continue;
if (it->mayWriteToMemory()) {
StoreInst *St = dyn_cast<StoreInst>(it);
if (!St) {
- emitAnalysis(LoopAccessReport(it) <<
+ emitAnalysis(LoopAccessReport(&*it) <<
"instruction cannot be vectorized");
CanVecMem = false;
return;
// always generate code immediately before the instruction being
// processed, so that the generated code dominates the same BBs
- Instruction *PrevInsertPoint = Builder.GetInsertPoint();
+ BuilderTy::InsertPointGuard Guard(Builder);
if (Instruction *I = dyn_cast<Instruction>(V))
Builder.SetInsertPoint(I);
Result = unknown();
}
- if (PrevInsertPoint)
- Builder.SetInsertPoint(PrevInsertPoint);
-
// Don't reuse CacheIt since it may be invalid at this point.
CacheMap[V] = Result;
return Result;
// compute offset/size for each PHI incoming pointer
for (unsigned i = 0, e = PHI.getNumIncomingValues(); i != e; ++i) {
- Builder.SetInsertPoint(PHI.getIncomingBlock(i)->getFirstInsertionPt());
+ Builder.SetInsertPoint(&*PHI.getIncomingBlock(i)->getFirstInsertionPt());
SizeOffsetEvalType EdgeData = compute_(PHI.getIncomingValue(i));
if (!bothKnown(EdgeData)) {
if (!Limit)
return MemDepResult::getUnknown();
- Instruction *Inst = --ScanIt;
+ Instruction *Inst = &*--ScanIt;
// If this inst is a memory op, get the pointer it accessed
MemoryLocation Loc;
// Walk backwards through the basic block, looking for dependencies.
while (ScanIt != BB->begin()) {
- Instruction *Inst = --ScanIt;
+ Instruction *Inst = &*--ScanIt;
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst))
// Debug intrinsics don't (and can't) cause dependencies.
if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(QueryInst))
isLoad |= II->getIntrinsicID() == Intrinsic::lifetime_start;
- LocalCache = getPointerDependencyFrom(MemLoc, isLoad, ScanPos,
- QueryParent, QueryInst);
+ LocalCache = getPointerDependencyFrom(
+ MemLoc, isLoad, ScanPos->getIterator(), QueryParent, QueryInst);
} else if (isa<CallInst>(QueryInst) || isa<InvokeInst>(QueryInst)) {
CallSite QueryCS(QueryInst);
bool isReadOnly = AA->onlyReadsMemory(QueryCS);
- LocalCache = getCallSiteDependencyFrom(QueryCS, isReadOnly, ScanPos,
- QueryParent);
+ LocalCache = getCallSiteDependencyFrom(
+ QueryCS, isReadOnly, ScanPos->getIterator(), QueryParent);
} else
// Non-memory instruction.
LocalCache = MemDepResult::getUnknown();
BasicBlock::iterator ScanPos = DirtyBB->end();
if (ExistingResult) {
if (Instruction *Inst = ExistingResult->getResult().getInst()) {
- ScanPos = Inst;
+ ScanPos = Inst->getIterator();
// We're removing QueryInst's use of Inst.
RemoveFromReverseMap(ReverseNonLocalDeps, Inst,
QueryCS.getInstruction());
assert(ExistingResult->getResult().getInst()->getParent() == BB &&
"Instruction invalidated?");
++NumCacheDirtyNonLocalPtr;
- ScanPos = ExistingResult->getResult().getInst();
+ ScanPos = ExistingResult->getResult().getInst()->getIterator();
// Eliminating the dirty entry from 'Cache', so update the reverse info.
ValueIsLoadPair CacheKey(Loc.Ptr, isLoad);
- RemoveFromReverseMap(ReverseNonLocalPtrDeps, ScanPos, CacheKey);
+ RemoveFromReverseMap(ReverseNonLocalPtrDeps, &*ScanPos, CacheKey);
} else {
++NumUncacheNonLocalPtr;
}
// the entire block to get to this point.
MemDepResult NewDirtyVal;
if (!RemInst->isTerminator())
- NewDirtyVal = MemDepResult::getDirty(++BasicBlock::iterator(RemInst));
+ NewDirtyVal = MemDepResult::getDirty(&*++RemInst->getIterator());
ReverseDepMapType::iterator ReverseDepIt = ReverseLocalDeps.find(RemInst);
if (ReverseDepIt != ReverseLocalDeps.end()) {
.Default(ARCInstKind::CallOrUser);
// One argument.
- const Argument *A0 = AI++;
+ const Argument *A0 = &*AI++;
if (AI == AE)
// Argument is a pointer.
if (PointerType *PTy = dyn_cast<PointerType>(A0->getType())) {
}
// Two arguments, first is i8**.
- const Argument *A1 = AI++;
+ const Argument *A1 = &*AI++;
if (AI == AE)
if (PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
if (PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))
// Create a new cast, and leave the old cast in place in case
// it is being used as an insert point. Clear its operand
// so that it doesn't hold anything live.
- Ret = CastInst::Create(Op, V, Ty, "", IP);
+ Ret = CastInst::Create(Op, V, Ty, "", &*IP);
Ret->takeName(CI);
CI->replaceAllUsesWith(Ret);
CI->setOperand(0, UndefValue::get(V->getType()));
// Create a new cast.
if (!Ret)
- Ret = CastInst::Create(Op, V, Ty, V->getName(), IP);
+ Ret = CastInst::Create(Op, V, Ty, V->getName(), &*IP);
// We assert at the end of the function since IP might point to an
// instruction with different dominance properties than a cast
// (an invoke for example) and not dominate BIP (but the cast does).
- assert(SE.DT.dominates(Ret, BIP));
+ assert(SE.DT.dominates(Ret, &*BIP));
rememberInstruction(Ret);
return Ret;
// Cast the instruction immediately after the instruction.
Instruction *I = cast<Instruction>(V);
- BasicBlock::iterator IP = I; ++IP;
+ BasicBlock::iterator IP = ++I->getIterator();
if (InvokeInst *II = dyn_cast<InvokeInst>(I))
IP = II->getNormalDest()->begin();
if (CatchPadInst *CPI = dyn_cast<CatchPadInst>(I))
ScanLimit++;
if (IP->getOpcode() == (unsigned)Opcode && IP->getOperand(0) == LHS &&
IP->getOperand(1) == RHS)
- return IP;
+ return &*IP;
if (IP == BlockBegin) break;
}
}
if (!Preheader) break;
// Ok, move up a level.
- Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ Builder.SetInsertPoint(Preheader->getTerminator());
}
// If we haven't found this binop, insert it.
Type::getInt8PtrTy(Ty->getContext(), PTy->getAddressSpace()));
assert(!isa<Instruction>(V) ||
- SE.DT.dominates(cast<Instruction>(V), Builder.GetInsertPoint()));
+ SE.DT.dominates(cast<Instruction>(V), &*Builder.GetInsertPoint()));
// Expand the operands for a plain byte offset.
Value *Idx = expandCodeFor(SE.getAddExpr(Ops), Ty);
ScanLimit++;
if (IP->getOpcode() == Instruction::GetElementPtr &&
IP->getOperand(0) == V && IP->getOperand(1) == Idx)
- return IP;
+ return &*IP;
if (IP == BlockBegin) break;
}
}
if (!Preheader) break;
// Ok, move up a level.
- Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ Builder.SetInsertPoint(Preheader->getTerminator());
}
// Emit a GEP.
if (!Preheader) break;
// Ok, move up a level.
- Builder.SetInsertPoint(Preheader, Preheader->getTerminator());
+ Builder.SetInsertPoint(Preheader->getTerminator());
}
// Insert a pretty getelementptr. Note that this GEP is not marked inbounds,
PostIncLoops.clear();
// Expand code for the start value.
- Value *StartV = expandCodeFor(Normalized->getStart(), ExpandTy,
- L->getHeader()->begin());
+ Value *StartV =
+ expandCodeFor(Normalized->getStart(), ExpandTy, &L->getHeader()->front());
// StartV must be hoisted into L's preheader to dominate the new phi.
assert(!isa<Instruction>(StartV) ||
if (useSubtract)
Step = SE.getNegativeSCEV(Step);
// Expand the step somewhere that dominates the loop header.
- Value *StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
+ Value *StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
// The no-wrap behavior proved by IsIncrement(NUW|NSW) is only applicable if
// we actually do emit an addition. It does not apply if we emit a
// expandCodeFor with an InsertPoint that is either outside the PostIncLoop
// or dominated by IVIncInsertPos.
if (isa<Instruction>(Result) &&
- !SE.DT.dominates(cast<Instruction>(Result), Builder.GetInsertPoint())) {
+ !SE.DT.dominates(cast<Instruction>(Result),
+ &*Builder.GetInsertPoint())) {
// The induction variable's postinc expansion does not dominate this use.
// IVUsers tries to prevent this case, so it is rare. However, it can
// happen when an IVUser outside the loop is not dominated by the latch
{
// Expand the step somewhere that dominates the loop header.
BuilderType::InsertPointGuard Guard(Builder);
- StepV = expandCodeFor(Step, IntTy, L->getHeader()->begin());
+ StepV = expandCodeFor(Step, IntTy, &L->getHeader()->front());
}
Result = expandIVInc(PN, StepV, L, ExpandTy, IntTy, useSubtract);
}
isa<LandingPadInst>(NewInsertPt))
++NewInsertPt;
V = expandCodeFor(SE.getTruncateExpr(SE.getUnknown(V), Ty), nullptr,
- NewInsertPt);
+ &*NewInsertPt);
return V;
}
BasicBlock *Header = L->getHeader();
pred_iterator HPB = pred_begin(Header), HPE = pred_end(Header);
CanonicalIV = PHINode::Create(Ty, std::distance(HPB, HPE), "indvar",
- Header->begin());
+ &Header->front());
rememberInstruction(CanonicalIV);
SmallSet<BasicBlock *, 4> PredSeen;
Value *SCEVExpander::expandCodeFor(const SCEV *SH, Type *Ty,
Instruction *IP) {
- Builder.SetInsertPoint(IP->getParent(), IP);
+ assert(IP);
+ Builder.SetInsertPoint(IP);
return expandCodeFor(SH, Ty);
}
Value *SCEVExpander::expand(const SCEV *S) {
// Compute an insertion point for this SCEV object. Hoist the instructions
// as far out in the loop nest as possible.
- Instruction *InsertPt = Builder.GetInsertPoint();
+ Instruction *InsertPt = &*Builder.GetInsertPoint();
for (Loop *L = SE.LI.getLoopFor(Builder.GetInsertBlock());;
L = L->getParentLoop())
if (SE.isLoopInvariant(S, L)) {
// LSR sets the insertion point for AddRec start/step values to the
// block start to simplify value reuse, even though it's an invalid
// position. SCEVExpander must correct for this in all cases.
- InsertPt = L->getHeader()->getFirstInsertionPt();
+ InsertPt = &*L->getHeader()->getFirstInsertionPt();
}
} else {
// If the SCEV is computable at this level, insert it into the header
// after the PHIs (and after any other instructions that we've inserted
// there) so that it is guaranteed to dominate any user inside the loop.
if (L && SE.hasComputableLoopEvolution(S, L) && !PostIncLoops.count(L))
- InsertPt = L->getHeader()->getFirstInsertionPt();
+ InsertPt = &*L->getHeader()->getFirstInsertionPt();
while (InsertPt != Builder.GetInsertPoint()
&& (isInsertedInstruction(InsertPt)
|| isa<DbgInfoIntrinsic>(InsertPt))) {
- InsertPt = std::next(BasicBlock::iterator(InsertPt));
+ InsertPt = &*std::next(InsertPt->getIterator());
}
break;
}
return I->second;
BuilderType::InsertPointGuard Guard(Builder);
- Builder.SetInsertPoint(InsertPt->getParent(), InsertPt);
+ Builder.SetInsertPoint(InsertPt);
// Expand the expression into instructions.
Value *V = visit(S);
// Emit code for it.
BuilderType::InsertPointGuard Guard(Builder);
- PHINode *V = cast<PHINode>(expandCodeFor(H, nullptr,
- L->getHeader()->begin()));
+ PHINode *V =
+ cast<PHINode>(expandCodeFor(H, nullptr, &L->getHeader()->front()));
return V;
}
if (OrigInc->getType() != IsomorphicInc->getType()) {
Instruction *IP = nullptr;
if (PHINode *PN = dyn_cast<PHINode>(OrigInc))
- IP = PN->getParent()->getFirstInsertionPt();
+ IP = &*PN->getParent()->getFirstInsertionPt();
else
IP = OrigInc->getNextNode();
++NumElim;
Value *NewIV = OrigPhiRef;
if (OrigPhiRef->getType() != Phi->getType()) {
- IRBuilder<> Builder(L->getHeader()->getFirstInsertionPt());
+ IRBuilder<> Builder(&*L->getHeader()->getFirstInsertionPt());
Builder.SetCurrentDebugLocation(Phi->getDebugLoc());
NewIV = Builder.CreateTruncOrBitCast(OrigPhiRef, Phi->getType(), IVName);
}
SmallVector<const SCEV *, 8> Operands;
const Loop *L = AR->getLoop();
// The addrec conceptually uses its operands at loop entry.
- Instruction *LUser = L->getHeader()->begin();
+ Instruction *LUser = &L->getHeader()->front();
// Transform each operand.
for (SCEVNAryExpr::op_iterator I = AR->op_begin(), E = AR->op_end();
I != E; ++I) {
void SparseSolver::Print(Function &F, raw_ostream &OS) const {
OS << "\nFUNCTION: " << F.getName() << "\n";
- for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) {
- if (!BBExecutable.count(BB))
+ for (auto &BB : F) {
+ if (!BBExecutable.count(&BB))
OS << "INFEASIBLE: ";
OS << "\t";
- if (BB->hasName())
- OS << BB->getName() << ":\n";
+ if (BB.hasName())
+ OS << BB.getName() << ":\n";
else
OS << "; anon bb\n";
- for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
- LatticeFunc->PrintValue(getLatticeState(I), OS);
- OS << *I << "\n";
+ for (auto &I : BB) {
+ LatticeFunc->PrintValue(getLatticeState(&I), OS);
+ OS << I << "\n";
}
OS << "\n";
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
// of the block); the common case is that the assume will come first.
for (BasicBlock::iterator I = std::next(BasicBlock::iterator(Inv)),
IE = Inv->getParent()->end(); I != IE; ++I)
- if (I == Q.CxtI)
+ if (&*I == Q.CxtI)
return true;
// The context must come first...
for (BasicBlock::const_iterator I =
std::next(BasicBlock::const_iterator(Q.CxtI)),
IE(Inv); I != IE; ++I)
- if (!isSafeToSpeculativelyExecute(I) && !isAssumeLikeIntrinsic(I))
+ if (!isSafeToSpeculativelyExecute(&*I) && !isAssumeLikeIntrinsic(&*I))
return false;
return !isEphemeralValueOf(Inv, Q.CxtI);
SmallSet<const Value *, 16> YieldsPoison;
YieldsPoison.insert(PoisonI);
- for (const Instruction *I = PoisonI, *E = BB->end(); I != E;
- I = I->getNextNode()) {
- if (I != PoisonI) {
- const Value *NotPoison = getGuaranteedNonFullPoisonOp(I);
+ for (BasicBlock::const_iterator I = PoisonI->getIterator(), E = BB->end();
+ I != E; ++I) {
+ if (&*I != PoisonI) {
+ const Value *NotPoison = getGuaranteedNonFullPoisonOp(&*I);
if (NotPoison != nullptr && YieldsPoison.count(NotPoison)) return true;
- if (!isGuaranteedToTransferExecutionToSuccessor(I)) return false;
+ if (!isGuaranteedToTransferExecutionToSuccessor(&*I))
+ return false;
}
// Mark poison that propagates from I through uses of I.
- if (YieldsPoison.count(I)) {
+ if (YieldsPoison.count(&*I)) {
for (const User *User : I->users()) {
const Instruction *UserI = cast<Instruction>(User);
if (UserI->getParent() == BB && propagatesFullPoison(UserI))