From b886fde9afa62670efac756f82cfcee682ea5250 Mon Sep 17 00:00:00 2001 From: "Duncan P. N. Exon Smith" Date: Tue, 13 Oct 2015 19:26:58 +0000 Subject: [PATCH] Scalar: Remove remaining ilist iterator implicit conversions Remove remaining `ilist_iterator` implicit conversions from LLVMScalarOpts. This change exposed some scary behaviour in lib/Transforms/Scalar/SCCP.cpp around line 1770. This patch changes a call from `Function::begin()` to `&Function::front()`, since the return was immediately being passed into another function that takes a `Function*`. `Function::front()` started to assert, since the function was empty. Note that `Function::end()` does not point at a legal `Function*` -- it points at an `ilist_half_node` -- so the other function was getting garbage before. (I added the missing check for `Function::isDeclaration()`.) Otherwise, no functionality change intended. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@250211 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Transforms/Scalar/LICM.cpp | 10 +- lib/Transforms/Scalar/LoopIdiomRecognize.cpp | 14 +- lib/Transforms/Scalar/LoopInstSimplify.cpp | 2 +- lib/Transforms/Scalar/LoopInterchange.cpp | 6 +- lib/Transforms/Scalar/LoopRerollPass.cpp | 10 +- lib/Transforms/Scalar/LoopRotation.cpp | 11 +- lib/Transforms/Scalar/LoopStrengthReduce.cpp | 41 ++--- lib/Transforms/Scalar/LoopUnswitch.cpp | 20 +-- lib/Transforms/Scalar/LowerAtomic.cpp | 6 +- lib/Transforms/Scalar/MemCpyOptimizer.cpp | 38 ++--- .../Scalar/MergedLoadStoreMotion.cpp | 11 +- lib/Transforms/Scalar/NaryReassociate.cpp | 18 +-- .../Scalar/PartiallyInlineLibCalls.cpp | 2 +- lib/Transforms/Scalar/PlaceSafepoints.cpp | 6 +- lib/Transforms/Scalar/Reassociate.cpp | 15 +- lib/Transforms/Scalar/Reg2Mem.cpp | 9 +- .../Scalar/RewriteStatepointsForGC.cpp | 23 ++- lib/Transforms/Scalar/SCCP.cpp | 140 +++++++++--------- lib/Transforms/Scalar/SROA.cpp | 14 +- .../Scalar/ScalarReplAggregates.cpp | 6 +- lib/Transforms/Scalar/Scalarizer.cpp | 27 ++-- .../Scalar/SeparateConstOffsetFromGEP.cpp | 2 +- lib/Transforms/Scalar/SimplifyCFGPass.cpp | 9 +- lib/Transforms/Scalar/Sink.cpp | 4 +- .../Scalar/SpeculativeExecution.cpp | 2 +- lib/Transforms/Scalar/StructurizeCFG.cpp | 4 +- .../Scalar/TailRecursionElimination.cpp | 30 ++-- 27 files changed, 240 insertions(+), 240 deletions(-) diff --git a/lib/Transforms/Scalar/LICM.cpp b/lib/Transforms/Scalar/LICM.cpp index 2348d83be09..ba39afd4287 100644 --- a/lib/Transforms/Scalar/LICM.cpp +++ b/lib/Transforms/Scalar/LICM.cpp @@ -587,7 +587,7 @@ static Instruction *CloneInstructionInExitBlock(const Instruction &I, if (!OLoop->contains(&PN)) { PHINode *OpPN = PHINode::Create(OInst->getType(), PN.getNumIncomingValues(), - OInst->getName() + ".lcssa", ExitBlock.begin()); + OInst->getName() + ".lcssa", &ExitBlock.front()); for (unsigned i = 0, e = PN.getNumIncomingValues(); i != e; ++i) OpPN->addIncoming(OInst, PN.getIncomingBlock(i)); *OI = OpPN; @@ -751,9 +751,9 @@ namespace { if (!L->contains(BB)) { // We need to create an LCSSA PHI node for the incoming value and // store that. - PHINode *PN = PHINode::Create( - I->getType(), PredCache.size(BB), - I->getName() + ".lcssa", BB->begin()); + PHINode *PN = + PHINode::Create(I->getType(), PredCache.size(BB), + I->getName() + ".lcssa", &BB->front()); for (BasicBlock *Pred : PredCache.get(BB)) PN->addIncoming(I, Pred); return PN; @@ -963,7 +963,7 @@ bool llvm::promoteLoopAccessesToScalars(AliasSet &AS, CurLoop->getUniqueExitBlocks(ExitBlocks); InsertPts.resize(ExitBlocks.size()); for (unsigned i = 0, e = ExitBlocks.size(); i != e; ++i) - InsertPts[i] = ExitBlocks[i]->getFirstInsertionPt(); + InsertPts[i] = &*ExitBlocks[i]->getFirstInsertionPt(); } // We use the SSAUpdater interface to insert phi nodes as required. diff --git a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp index f5b61edd5cc..f757297dc96 100644 --- a/lib/Transforms/Scalar/LoopIdiomRecognize.cpp +++ b/lib/Transforms/Scalar/LoopIdiomRecognize.cpp @@ -258,10 +258,10 @@ bool LoopIdiomRecognize::runOnLoopBlock( bool MadeChange = false; for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E;) { - Instruction *Inst = I++; + Instruction *Inst = &*I++; // Look for store instructions, which may be optimized to memset/memcpy. if (StoreInst *SI = dyn_cast(Inst)) { - WeakVH InstPtr(I); + WeakVH InstPtr(&*I); if (!processLoopStore(SI, BECount)) continue; MadeChange = true; @@ -275,7 +275,7 @@ bool LoopIdiomRecognize::runOnLoopBlock( // Look for memset instructions, which may be optimized to a larger memset. if (MemSetInst *MSI = dyn_cast(Inst)) { - WeakVH InstPtr(I); + WeakVH InstPtr(&*I); if (!processLoopMemSet(MSI, BECount)) continue; MadeChange = true; @@ -416,7 +416,7 @@ static bool mayLoopAccessLocation(Value *Ptr, ModRefInfo Access, Loop *L, for (Loop::block_iterator BI = L->block_begin(), E = L->block_end(); BI != E; ++BI) for (BasicBlock::iterator I = (*BI)->begin(), E = (*BI)->end(); I != E; ++I) - if (&*I != IgnoredStore && (AA.getModRefInfo(I, StoreLoc) & Access)) + if (&*I != IgnoredStore && (AA.getModRefInfo(&*I, StoreLoc) & Access)) return true; return false; @@ -777,10 +777,10 @@ static bool detectPopcountIdiom(Loop *CurLoop, BasicBlock *PreCondBB, // step 4: Find the instruction which count the population: cnt2 = cnt1 + 1 { CountInst = nullptr; - for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI(), + for (BasicBlock::iterator Iter = LoopEntry->getFirstNonPHI()->getIterator(), IterE = LoopEntry->end(); Iter != IterE; Iter++) { - Instruction *Inst = Iter; + Instruction *Inst = &*Iter; if (Inst->getOpcode() != Instruction::Add) continue; @@ -972,7 +972,7 @@ void LoopIdiomRecognize::transformLoopToPopcount(BasicBlock *PreCondBB, ICmpInst *LbCond = cast(LbBr->getCondition()); Type *Ty = TripCnt->getType(); - PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", Body->begin()); + PHINode *TcPhi = PHINode::Create(Ty, 2, "tcphi", &Body->front()); Builder.SetInsertPoint(LbCond); Instruction *TcDec = cast( diff --git a/lib/Transforms/Scalar/LoopInstSimplify.cpp b/lib/Transforms/Scalar/LoopInstSimplify.cpp index 39c7738ca9c..b4102fe9ba3 100644 --- a/lib/Transforms/Scalar/LoopInstSimplify.cpp +++ b/lib/Transforms/Scalar/LoopInstSimplify.cpp @@ -112,7 +112,7 @@ bool LoopInstSimplify::runOnLoop(Loop *L, LPPassManager &LPM) { // Simplify instructions in the current basic block. for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { - Instruction *I = BI++; + Instruction *I = &*BI++; // The first time through the loop ToSimplify is empty and we try to // simplify all instructions. On later iterations ToSimplify is not diff --git a/lib/Transforms/Scalar/LoopInterchange.cpp b/lib/Transforms/Scalar/LoopInterchange.cpp index e296b710a6f..8f703006e12 100644 --- a/lib/Transforms/Scalar/LoopInterchange.cpp +++ b/lib/Transforms/Scalar/LoopInterchange.cpp @@ -99,7 +99,7 @@ static bool populateDependencyMatrix(CharMatrix &DepMatrix, unsigned Level, return false; if (St && !St->isSimple()) return false; - MemInstr.push_back(I); + MemInstr.push_back(&*I); } } @@ -1113,8 +1113,8 @@ static void moveBBContents(BasicBlock *FromBB, Instruction *InsertBefore) { auto &ToList = InsertBefore->getParent()->getInstList(); auto &FromList = FromBB->getInstList(); - ToList.splice(InsertBefore, FromList, FromList.begin(), - FromBB->getTerminator()); + ToList.splice(InsertBefore->getIterator(), FromList, FromList.begin(), + FromBB->getTerminator()->getIterator()); } void LoopInterchangeTransform::adjustOuterLoopPreheader() { diff --git a/lib/Transforms/Scalar/LoopRerollPass.cpp b/lib/Transforms/Scalar/LoopRerollPass.cpp index 282071364dc..9b295b6d161 100644 --- a/lib/Transforms/Scalar/LoopRerollPass.cpp +++ b/lib/Transforms/Scalar/LoopRerollPass.cpp @@ -484,7 +484,7 @@ void LoopReroll::collectPossibleIVs(Loop *L, continue; if (const SCEVAddRecExpr *PHISCEV = - dyn_cast(SE->getSCEV(I))) { + dyn_cast(SE->getSCEV(&*I))) { if (PHISCEV->getLoop() != L) continue; if (!PHISCEV->isAffine()) @@ -494,10 +494,10 @@ void LoopReroll::collectPossibleIVs(Loop *L, const APInt &AInt = IncSCEV->getValue()->getValue().abs(); if (IncSCEV->getValue()->isZero() || AInt.uge(MaxInc)) continue; - IVToIncMap[I] = IncSCEV->getValue()->getSExtValue(); + IVToIncMap[&*I] = IncSCEV->getValue()->getSExtValue(); DEBUG(dbgs() << "LRR: Possible IV: " << *I << " = " << *PHISCEV << "\n"); - PossibleIVs.push_back(I); + PossibleIVs.push_back(&*I); } } } @@ -558,7 +558,7 @@ void LoopReroll::collectPossibleReductions(Loop *L, if (!I->getType()->isSingleValueType()) continue; - SimpleLoopReduction SLR(I, L); + SimpleLoopReduction SLR(&*I, L); if (!SLR.valid()) continue; @@ -1297,7 +1297,7 @@ void LoopReroll::DAGRootTracker::replace(const SCEV *IterCount) { SCEV::FlagAnyWrap)); { // Limit the lifetime of SCEVExpander. SCEVExpander Expander(*SE, DL, "reroll"); - Value *NewIV = Expander.expandCodeFor(H, IV->getType(), Header->begin()); + Value *NewIV = Expander.expandCodeFor(H, IV->getType(), &Header->front()); for (auto &KV : Uses) { if (KV.second.find_first() == 0) diff --git a/lib/Transforms/Scalar/LoopRotation.cpp b/lib/Transforms/Scalar/LoopRotation.cpp index aeb5b36b4b8..631c6ff5b0b 100644 --- a/lib/Transforms/Scalar/LoopRotation.cpp +++ b/lib/Transforms/Scalar/LoopRotation.cpp @@ -158,7 +158,7 @@ static void RewriteUsesOfClonedInstructions(BasicBlock *OrigHeader, // as necessary. SSAUpdater SSA; for (I = OrigHeader->begin(); I != E; ++I) { - Value *OrigHeaderVal = I; + Value *OrigHeaderVal = &*I; // If there are no uses of the value (e.g. because it returns void), there // is nothing to rewrite. @@ -221,7 +221,7 @@ static bool shouldSpeculateInstrs(BasicBlock::iterator Begin, for (BasicBlock::iterator I = Begin; I != End; ++I) { - if (!isSafeToSpeculativelyExecute(I)) + if (!isSafeToSpeculativelyExecute(&*I)) return false; if (isa(I)) @@ -301,14 +301,15 @@ bool LoopRotate::simplifyLoopLatch(Loop *L) { if (!BI) return false; - if (!shouldSpeculateInstrs(Latch->begin(), Jmp, L)) + if (!shouldSpeculateInstrs(Latch->begin(), Jmp->getIterator(), L)) return false; DEBUG(dbgs() << "Folding loop latch " << Latch->getName() << " into " << LastExit->getName() << "\n"); // Hoist the instructions from Latch into LastExit. - LastExit->getInstList().splice(BI, Latch->getInstList(), Latch->begin(), Jmp); + LastExit->getInstList().splice(BI->getIterator(), Latch->getInstList(), + Latch->begin(), Jmp->getIterator()); unsigned FallThruPath = BI->getSuccessor(0) == Latch ? 0 : 1; BasicBlock *Header = Jmp->getSuccessor(0); @@ -431,7 +432,7 @@ bool LoopRotate::rotateLoop(Loop *L, bool SimplifiedLatch) { // possible or create a clone in the OldPreHeader if not. TerminatorInst *LoopEntryBranch = OrigPreheader->getTerminator(); while (I != E) { - Instruction *Inst = I++; + Instruction *Inst = &*I++; // If the instruction's operands are invariant and it doesn't read or write // memory, then it is safe to hoist. Doing this doesn't change the order of diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index 4cb1955cafb..7a7d0359b17 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -2191,7 +2191,7 @@ LSRInstance::OptimizeLoopTermCond() { ICmpInst *OldCond = Cond; Cond = cast(Cond->clone()); Cond->setName(L->getHeader()->getName() + ".termcond"); - ExitingBlock->getInstList().insert(TermBr, Cond); + ExitingBlock->getInstList().insert(TermBr->getIterator(), Cond); // Clone the IVUse, as the old use still exists! CondUse = &IU.AddUser(Cond, CondUse->getOperandValToReplace()); @@ -2765,19 +2765,19 @@ void LSRInstance::CollectChains() { for (BasicBlock::iterator I = (*BBIter)->begin(), E = (*BBIter)->end(); I != E; ++I) { // Skip instructions that weren't seen by IVUsers analysis. - if (isa(I) || !IU.isIVUserOrOperand(I)) + if (isa(I) || !IU.isIVUserOrOperand(&*I)) continue; // Ignore users that are part of a SCEV expression. This way we only // consider leaf IV Users. This effectively rediscovers a portion of // IVUsers analysis but in program order this time. - if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(I))) + if (SE.isSCEVable(I->getType()) && !isa(SE.getSCEV(&*I))) continue; // Remove this instruction from any NearUsers set it may be in. for (unsigned ChainIdx = 0, NChains = IVChainVec.size(); ChainIdx < NChains; ++ChainIdx) { - ChainUsersVec[ChainIdx].NearUsers.erase(I); + ChainUsersVec[ChainIdx].NearUsers.erase(&*I); } // Search for operands that can be chained. SmallPtrSet UniqueOperands; @@ -2786,7 +2786,7 @@ void LSRInstance::CollectChains() { while (IVOpIter != IVOpEnd) { Instruction *IVOpInst = cast(*IVOpIter); if (UniqueOperands.insert(IVOpInst).second) - ChainInstruction(I, IVOpInst, ChainUsersVec); + ChainInstruction(&*I, IVOpInst, ChainUsersVec); IVOpIter = findIVOperand(std::next(IVOpIter), IVOpEnd, L, SE); } } // Continue walking down the instructions. @@ -4368,14 +4368,14 @@ LSRInstance::HoistInsertPosition(BasicBlock::iterator IP, // instead of at the end, so that it can be used for other expansions. if (IDom == Inst->getParent() && (!BetterPos || !DT.dominates(Inst, BetterPos))) - BetterPos = std::next(BasicBlock::iterator(Inst)); + BetterPos = &*std::next(BasicBlock::iterator(Inst)); } if (!AllDominate) break; if (BetterPos) - IP = BetterPos; + IP = BetterPos->getIterator(); else - IP = Tentative; + IP = Tentative->getIterator(); } return IP; @@ -4440,7 +4440,8 @@ LSRInstance::AdjustInsertPositionForExpand(BasicBlock::iterator LowestIP, // Set IP below instructions recently inserted by SCEVExpander. This keeps the // IP consistent across expansions and allows the previously inserted // instructions to be reused by subsequent expansion. - while (Rewriter.isInsertedInstruction(IP) && IP != LowestIP) ++IP; + while (Rewriter.isInsertedInstruction(&*IP) && IP != LowestIP) + ++IP; return IP; } @@ -4490,7 +4491,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, LF.UserInst, LF.OperandValToReplace, Loops, SE, DT); - Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, IP))); + Ops.push_back(SE.getUnknown(Rewriter.expandCodeFor(Reg, nullptr, &*IP))); } // Expand the ScaledReg portion. @@ -4508,14 +4509,14 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Expand ScaleReg as if it was part of the base regs. if (F.Scale == 1) Ops.push_back( - SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP))); + SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, &*IP))); else { // An interesting way of "folding" with an icmp is to use a negated // scale, which we'll implement by inserting it into the other operand // of the icmp. assert(F.Scale == -1 && "The only scale supported by ICmpZero uses is -1!"); - ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, IP); + ICmpScaledV = Rewriter.expandCodeFor(ScaledS, nullptr, &*IP); } } else { // Otherwise just expand the scaled register and an explicit scale, @@ -4525,11 +4526,11 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Unless the addressing mode will not be folded. if (!Ops.empty() && LU.Kind == LSRUse::Address && isAMCompletelyFolded(TTI, LU, F)) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } - ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, IP)); + ScaledS = SE.getUnknown(Rewriter.expandCodeFor(ScaledS, nullptr, &*IP)); if (F.Scale != 1) ScaledS = SE.getMulExpr(ScaledS, SE.getConstant(ScaledS->getType(), F.Scale)); @@ -4541,7 +4542,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, if (F.BaseGV) { // Flush the operand list to suppress SCEVExpander hoisting. if (!Ops.empty()) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } @@ -4551,7 +4552,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, // Flush the operand list to suppress SCEVExpander hoisting of both folded and // unfolded offsets. LSR assumes they both live next to their uses. if (!Ops.empty()) { - Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, IP); + Value *FullV = Rewriter.expandCodeFor(SE.getAddExpr(Ops), Ty, &*IP); Ops.clear(); Ops.push_back(SE.getUnknown(FullV)); } @@ -4587,7 +4588,7 @@ Value *LSRInstance::Expand(const LSRFixup &LF, const SCEV *FullS = Ops.empty() ? SE.getConstant(IntTy, 0) : SE.getAddExpr(Ops); - Value *FullV = Rewriter.expandCodeFor(FullS, Ty, IP); + Value *FullV = Rewriter.expandCodeFor(FullS, Ty, &*IP); // We're done expanding now, so reset the rewriter. Rewriter.clearPostInc(); @@ -4687,7 +4688,8 @@ void LSRInstance::RewriteForPHI(PHINode *PN, if (!Pair.second) PN->setIncomingValue(i, Pair.first->second); else { - Value *FullV = Expand(LF, F, BB->getTerminator(), Rewriter, DeadInsts); + Value *FullV = Expand(LF, F, BB->getTerminator()->getIterator(), + Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); @@ -4717,7 +4719,8 @@ void LSRInstance::Rewrite(const LSRFixup &LF, if (PHINode *PN = dyn_cast(LF.UserInst)) { RewriteForPHI(PN, LF, F, Rewriter, DeadInsts, P); } else { - Value *FullV = Expand(LF, F, LF.UserInst, Rewriter, DeadInsts); + Value *FullV = + Expand(LF, F, LF.UserInst->getIterator(), Rewriter, DeadInsts); // If this is reuse-by-noop-cast, insert the noop cast. Type *OpTy = LF.OperandValToReplace->getType(); diff --git a/lib/Transforms/Scalar/LoopUnswitch.cpp b/lib/Transforms/Scalar/LoopUnswitch.cpp index 0f7ce7eb052..d55fad0f1fa 100644 --- a/lib/Transforms/Scalar/LoopUnswitch.cpp +++ b/lib/Transforms/Scalar/LoopUnswitch.cpp @@ -782,7 +782,7 @@ void LoopUnswitch::UnswitchTrivialCondition(Loop *L, Value *Cond, Constant *Val, // without actually branching to it (the exit block should be dominated by the // loop header, not the preheader). assert(!L->contains(ExitBlock) && "Exit block is in the loop?"); - BasicBlock *NewExit = SplitBlock(ExitBlock, ExitBlock->begin(), DT, LI); + BasicBlock *NewExit = SplitBlock(ExitBlock, &ExitBlock->front(), DT, LI); // Okay, now we have a position to branch from and a position to branch to, // insert the new conditional branch. @@ -839,8 +839,8 @@ bool LoopUnswitch::TryTrivialLoopUnswitch(bool &Changed) { // Check if this loop will execute any side-effecting instructions (e.g. // stores, calls, volatile loads) in the part of the loop that the code // *would* execute. Check the header first. - for (BasicBlock::iterator I : *CurrentBB) - if (I->mayHaveSideEffects()) + for (Instruction &I : *CurrentBB) + if (I.mayHaveSideEffects()) return false; // FIXME: add check for constant foldable switch instructions. @@ -1019,8 +1019,9 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, // Splice the newly inserted blocks into the function right before the // original preheader. - F->getBasicBlockList().splice(NewPreheader, F->getBasicBlockList(), - NewBlocks[0], F->end()); + F->getBasicBlockList().splice(NewPreheader->getIterator(), + F->getBasicBlockList(), + NewBlocks[0]->getIterator(), F->end()); // FIXME: We could register any cloned assumptions instead of clearing the // whole function's cache. @@ -1062,7 +1063,7 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, if (LandingPadInst *LPad = NewExit->getLandingPadInst()) { PHINode *PN = PHINode::Create(LPad->getType(), 0, "", - ExitSucc->getFirstInsertionPt()); + &*ExitSucc->getFirstInsertionPt()); for (pred_iterator I = pred_begin(ExitSucc), E = pred_end(ExitSucc); I != E; ++I) { @@ -1078,7 +1079,8 @@ void LoopUnswitch::UnswitchNontrivialCondition(Value *LIC, Constant *Val, for (unsigned i = 0, e = NewBlocks.size(); i != e; ++i) for (BasicBlock::iterator I = NewBlocks[i]->begin(), E = NewBlocks[i]->end(); I != E; ++I) - RemapInstruction(I, VMap,RF_NoModuleLevelChanges|RF_IgnoreMissingEntries); + RemapInstruction(&*I, VMap, + RF_NoModuleLevelChanges | RF_IgnoreMissingEntries); // Rewrite the original preheader to select between versions of the loop. BranchInst *OldBR = cast(loopPreheader->getTerminator()); @@ -1322,8 +1324,8 @@ void LoopUnswitch::SimplifyCode(std::vector &Worklist, Loop *L) { Succ->replaceAllUsesWith(Pred); // Move all of the successor contents from Succ to Pred. - Pred->getInstList().splice(BI, Succ->getInstList(), Succ->begin(), - Succ->end()); + Pred->getInstList().splice(BI->getIterator(), Succ->getInstList(), + Succ->begin(), Succ->end()); LPM->deleteSimpleAnalysisValue(BI, L); BI->eraseFromParent(); RemoveFromWorklist(BI, Worklist); diff --git a/lib/Transforms/Scalar/LowerAtomic.cpp b/lib/Transforms/Scalar/LowerAtomic.cpp index 3314e1ed41a..41511bcb7b0 100644 --- a/lib/Transforms/Scalar/LowerAtomic.cpp +++ b/lib/Transforms/Scalar/LowerAtomic.cpp @@ -22,7 +22,7 @@ using namespace llvm; #define DEBUG_TYPE "loweratomic" static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { - IRBuilder<> Builder(CXI->getParent(), CXI); + IRBuilder<> Builder(CXI); Value *Ptr = CXI->getPointerOperand(); Value *Cmp = CXI->getCompareOperand(); Value *Val = CXI->getNewValOperand(); @@ -41,7 +41,7 @@ static bool LowerAtomicCmpXchgInst(AtomicCmpXchgInst *CXI) { } static bool LowerAtomicRMWInst(AtomicRMWInst *RMWI) { - IRBuilder<> Builder(RMWI->getParent(), RMWI); + IRBuilder<> Builder(RMWI); Value *Ptr = RMWI->getPointerOperand(); Value *Val = RMWI->getValOperand(); @@ -120,7 +120,7 @@ namespace { return false; bool Changed = false; for (BasicBlock::iterator DI = BB.begin(), DE = BB.end(); DI != DE; ) { - Instruction *Inst = DI++; + Instruction *Inst = &*DI++; if (FenceInst *FI = dyn_cast(Inst)) Changed |= LowerFenceInst(FI); else if (AtomicCmpXchgInst *CXI = dyn_cast(Inst)) diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index 64f8aafcda0..f80b07bf219 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -379,7 +379,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, // are stored. MemsetRanges Ranges(DL); - BasicBlock::iterator BI = StartInst; + BasicBlock::iterator BI(StartInst); for (++BI; !isa(BI); ++BI) { if (!isa(BI) && !isa(BI)) { // If the instruction is readnone, ignore it, otherwise bail out. We @@ -434,7 +434,7 @@ Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst, // If we create any memsets, we put it right before the first instruction that // isn't part of the memset block. This ensure that the memset is dominated // by any addressing instruction needed by the start of the block. - IRBuilder<> Builder(BI); + IRBuilder<> Builder(&*BI); // Now that we have full information about ranges, loop over the ranges and // emit memset's for anything big enough to be worthwhile. @@ -516,8 +516,8 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { // the call and the store. AliasAnalysis &AA = getAnalysis().getAAResults(); MemoryLocation StoreLoc = MemoryLocation::get(SI); - for (BasicBlock::iterator I = --BasicBlock::iterator(SI), - E = C; I != E; --I) { + for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator(); + I != E; --I) { if (AA.getModRefInfo(&*I, StoreLoc) != MRI_NoModRef) { C = nullptr; break; @@ -559,7 +559,7 @@ bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) { if (Value *ByteVal = isBytewiseValue(SI->getOperand(0))) if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(), ByteVal)) { - BBI = I; // Don't invalidate iterator. + BBI = I->getIterator(); // Don't invalidate iterator. return true; } @@ -572,7 +572,7 @@ bool MemCpyOpt::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) { if (isa(MSI->getLength()) && !MSI->isVolatile()) if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(), MSI->getValue())) { - BBI = I; // Don't invalidate iterator. + BBI = I->getIterator(); // Don't invalidate iterator. return true; } return false; @@ -803,8 +803,9 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep) { // // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. - MemDepResult SourceDep = MD->getPointerDependencyFrom( - MemoryLocation::getForSource(MDep), false, M, M->getParent()); + MemDepResult SourceDep = + MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, + M->getIterator(), M->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; @@ -861,8 +862,9 @@ bool MemCpyOpt::processMemSetMemCpyDependence(MemCpyInst *MemCpy, return false; // Check that there are no other dependencies on the memset destination. - MemDepResult DstDepInfo = MD->getPointerDependencyFrom( - MemoryLocation::getForDest(MemSet), false, MemCpy, MemCpy->getParent()); + MemDepResult DstDepInfo = + MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false, + MemCpy->getIterator(), MemCpy->getParent()); if (DstDepInfo.getInst() != MemSet) return false; @@ -999,8 +1001,8 @@ bool MemCpyOpt::processMemCpy(MemCpyInst *M) { } MemoryLocation SrcLoc = MemoryLocation::getForSource(M); - MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(SrcLoc, true, - M, M->getParent()); + MemDepResult SrcDepInfo = MD->getPointerDependencyFrom( + SrcLoc, true, M->getIterator(), M->getParent()); if (SrcDepInfo.isClobber()) { if (MemCpyInst *MDep = dyn_cast(SrcDepInfo.getInst())) @@ -1077,8 +1079,8 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { Type *ByValTy = cast(ByValArg->getType())->getElementType(); uint64_t ByValSize = DL.getTypeAllocSize(ByValTy); MemDepResult DepInfo = MD->getPointerDependencyFrom( - MemoryLocation(ByValArg, ByValSize), true, CS.getInstruction(), - CS.getInstruction()->getParent()); + MemoryLocation(ByValArg, ByValSize), true, + CS.getInstruction()->getIterator(), CS.getInstruction()->getParent()); if (!DepInfo.isClobber()) return false; @@ -1120,9 +1122,9 @@ bool MemCpyOpt::processByValArgument(CallSite CS, unsigned ArgNo) { // // NOTE: This is conservative, it will stop on any read from the source loc, // not just the defining memcpy. - MemDepResult SourceDep = - MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false, - CS.getInstruction(), MDep->getParent()); + MemDepResult SourceDep = MD->getPointerDependencyFrom( + MemoryLocation::getForSource(MDep), false, + CS.getInstruction()->getIterator(), MDep->getParent()); if (!SourceDep.isClobber() || SourceDep.getInst() != MDep) return false; @@ -1149,7 +1151,7 @@ bool MemCpyOpt::iterateOnFunction(Function &F) { for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) { for (BasicBlock::iterator BI = BB->begin(), BE = BB->end(); BI != BE;) { // Avoid invalidating the iterator. - Instruction *I = BI++; + Instruction *I = &*BI++; bool RepeatInstruction = false; diff --git a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp index e4ecf678dff..4161d6f7dd7 100644 --- a/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp +++ b/lib/Transforms/Scalar/MergedLoadStoreMotion.cpp @@ -258,7 +258,7 @@ LoadInst *MergedLoadStoreMotion::canHoistFromBlock(BasicBlock *BB1, for (BasicBlock::iterator BBI = BB1->begin(), BBE = BB1->end(); BBI != BBE; ++BBI) { - Instruction *Inst = BBI; + Instruction *Inst = &*BBI; // Only merge and hoist loads when their result in used only in BB if (!isa(Inst) || Inst->isUsedOutsideOfBlock(BB1)) @@ -365,8 +365,7 @@ bool MergedLoadStoreMotion::mergeLoads(BasicBlock *BB) { int NLoads = 0; for (BasicBlock::iterator BBI = Succ0->begin(), BBE = Succ0->end(); BBI != BBE;) { - - Instruction *I = BBI; + Instruction *I = &*BBI; ++BBI; // Only move non-simple (atomic, volatile) loads. @@ -444,7 +443,7 @@ PHINode *MergedLoadStoreMotion::getPHIOperand(BasicBlock *BB, StoreInst *S0, Value *Opd2 = S1->getValueOperand(); if (Opd1 != Opd2) { NewPN = PHINode::Create(Opd1->getType(), 2, Opd2->getName() + ".sink", - BB->begin()); + &BB->front()); NewPN->addIncoming(Opd1, S0->getParent()); NewPN->addIncoming(Opd2, S1->getParent()); if (MD && NewPN->getType()->getScalarType()->isPointerTy()) @@ -478,7 +477,7 @@ bool MergedLoadStoreMotion::sinkStore(BasicBlock *BB, StoreInst *S0, // Create the new store to be inserted at the join point. StoreInst *SNew = (StoreInst *)(S0->clone()); Instruction *ANew = A0->clone(); - SNew->insertBefore(InsertPt); + SNew->insertBefore(&*InsertPt); ANew->insertBefore(SNew); assert(S0->getParent() == A0->getParent()); @@ -574,7 +573,7 @@ bool MergedLoadStoreMotion::runOnFunction(Function &F) { // Merge unconditional branches, allowing PRE to catch more // optimization opportunities. for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE;) { - BasicBlock *BB = FI++; + BasicBlock *BB = &*FI++; // Hoist equivalent loads and sink stores // outside diamonds when possible diff --git a/lib/Transforms/Scalar/NaryReassociate.cpp b/lib/Transforms/Scalar/NaryReassociate.cpp index 972bd335c85..930552d2f90 100644 --- a/lib/Transforms/Scalar/NaryReassociate.cpp +++ b/lib/Transforms/Scalar/NaryReassociate.cpp @@ -246,21 +246,21 @@ bool NaryReassociate::doOneIteration(Function &F) { Node != GraphTraits::nodes_end(DT); ++Node) { BasicBlock *BB = Node->getBlock(); for (auto I = BB->begin(); I != BB->end(); ++I) { - if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(I)) { - const SCEV *OldSCEV = SE->getSCEV(I); - if (Instruction *NewI = tryReassociate(I)) { + if (SE->isSCEVable(I->getType()) && isPotentiallyNaryReassociable(&*I)) { + const SCEV *OldSCEV = SE->getSCEV(&*I); + if (Instruction *NewI = tryReassociate(&*I)) { Changed = true; - SE->forgetValue(I); + SE->forgetValue(&*I); I->replaceAllUsesWith(NewI); // If SeenExprs constains I's WeakVH, that entry will be replaced with // nullptr. - RecursivelyDeleteTriviallyDeadInstructions(I, TLI); - I = NewI; + RecursivelyDeleteTriviallyDeadInstructions(&*I, TLI); + I = NewI->getIterator(); } // Add the rewritten instruction to SeenExprs; the original instruction // is deleted. - const SCEV *NewSCEV = SE->getSCEV(I); - SeenExprs[NewSCEV].push_back(WeakVH(I)); + const SCEV *NewSCEV = SE->getSCEV(&*I); + SeenExprs[NewSCEV].push_back(WeakVH(&*I)); // Ideally, NewSCEV should equal OldSCEV because tryReassociate(I) // is equivalent to I. However, ScalarEvolution::getSCEV may // weaken nsw causing NewSCEV not to equal OldSCEV. For example, suppose @@ -280,7 +280,7 @@ bool NaryReassociate::doOneIteration(Function &F) { // // This improvement is exercised in @reassociate_gep_nsw in nary-gep.ll. if (NewSCEV != OldSCEV) - SeenExprs[OldSCEV].push_back(WeakVH(I)); + SeenExprs[OldSCEV].push_back(WeakVH(&*I)); } } } diff --git a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp index 31d7df39c78..9f26f78892c 100644 --- a/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp +++ b/lib/Transforms/Scalar/PartiallyInlineLibCalls.cpp @@ -154,7 +154,7 @@ bool PartiallyInlineLibCalls::optimizeSQRT(CallInst *Call, Phi->addIncoming(Call, &CurrBB); Phi->addIncoming(LibCall, LibCallBB); - BB = JoinBB; + BB = JoinBB->getIterator(); return true; } diff --git a/lib/Transforms/Scalar/PlaceSafepoints.cpp b/lib/Transforms/Scalar/PlaceSafepoints.cpp index 3552dcdfa73..379d9639f91 100644 --- a/lib/Transforms/Scalar/PlaceSafepoints.cpp +++ b/lib/Transforms/Scalar/PlaceSafepoints.cpp @@ -423,14 +423,14 @@ static Instruction *findLocationForEntrySafepoint(Function &F, assert(hasNextInstruction(I) && "first check if there is a next instruction!"); if (I->isTerminator()) { - return I->getParent()->getUniqueSuccessor()->begin(); + return &I->getParent()->getUniqueSuccessor()->front(); } else { - return std::next(BasicBlock::iterator(I)); + return &*++I->getIterator(); } }; Instruction *cursor = nullptr; - for (cursor = F.getEntryBlock().begin(); hasNextInstruction(cursor); + for (cursor = &F.getEntryBlock().front(); hasNextInstruction(cursor); cursor = nextInstruction(cursor)) { // We need to ensure a safepoint poll occurs before any 'real' call. The diff --git a/lib/Transforms/Scalar/Reassociate.cpp b/lib/Transforms/Scalar/Reassociate.cpp index adfd5f9af9e..dd8e7fb0619 100644 --- a/lib/Transforms/Scalar/Reassociate.cpp +++ b/lib/Transforms/Scalar/Reassociate.cpp @@ -956,14 +956,13 @@ static Value *NegateValue(Value *V, Instruction *BI) { } else if (auto *CPI = dyn_cast(InstInput)) { InsertPt = CPI->getNormalDest()->begin(); } else { - InsertPt = InstInput; - ++InsertPt; + InsertPt = ++InstInput->getIterator(); } while (isa(InsertPt)) ++InsertPt; } else { InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin(); } - TheNeg->moveBefore(InsertPt); + TheNeg->moveBefore(&*InsertPt); if (TheNeg->getOpcode() == Instruction::Sub) { TheNeg->setHasNoUnsignedWrap(false); TheNeg->setHasNoSignedWrap(false); @@ -1150,7 +1149,7 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) { return nullptr; } - BasicBlock::iterator InsertPt = BO; ++InsertPt; + BasicBlock::iterator InsertPt = ++BO->getIterator(); // If this was just a single multiply, remove the multiply and return the only // remaining operand. @@ -1163,7 +1162,7 @@ Value *Reassociate::RemoveFactorFromExpression(Value *V, Value *Factor) { } if (NeedsNegate) - V = CreateNeg(V, "neg", InsertPt, BO); + V = CreateNeg(V, "neg", &*InsertPt, BO); return V; } @@ -2234,10 +2233,10 @@ bool Reassociate::runOnFunction(Function &F) { for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) { // Optimize every instruction in the basic block. for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE; ) - if (isInstructionTriviallyDead(II)) { - EraseInst(II++); + if (isInstructionTriviallyDead(&*II)) { + EraseInst(&*II++); } else { - OptimizeInst(II); + OptimizeInst(&*II); assert(II->getParent() == BI && "Moved to a different block!"); ++II; } diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp index 1b46727c17b..915f89780c0 100644 --- a/lib/Transforms/Scalar/Reg2Mem.cpp +++ b/lib/Transforms/Scalar/Reg2Mem.cpp @@ -82,10 +82,9 @@ bool RegToMem::runOnFunction(Function &F) { BasicBlock::iterator I = BBEntry->begin(); while (isa(I)) ++I; - CastInst *AllocaInsertionPoint = - new BitCastInst(Constant::getNullValue(Type::getInt32Ty(F.getContext())), - Type::getInt32Ty(F.getContext()), - "reg2mem alloca point", I); + CastInst *AllocaInsertionPoint = new BitCastInst( + Constant::getNullValue(Type::getInt32Ty(F.getContext())), + Type::getInt32Ty(F.getContext()), "reg2mem alloca point", &*I); // Find the escaped instructions. But don't create stack slots for // allocas in entry block. @@ -95,7 +94,7 @@ bool RegToMem::runOnFunction(Function &F) { for (BasicBlock::iterator iib = ibb->begin(), iie = ibb->end(); iib != iie; ++iib) { if (!(isa(iib) && iib->getParent() == BBEntry) && - valueEscapes(iib)) { + valueEscapes(&*iib)) { WorkList.push_front(&*iib); } } diff --git a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp index 8107d0bd18f..f5797164355 100644 --- a/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp +++ b/lib/Transforms/Scalar/RewriteStatepointsForGC.cpp @@ -1415,7 +1415,7 @@ makeStatepointExplicitImpl(const CallSite CS, /* to replace */ UnwindBlock->getUniquePredecessor() && "can't safely insert in this block!"); - Builder.SetInsertPoint(UnwindBlock->getFirstInsertionPt()); + Builder.SetInsertPoint(&*UnwindBlock->getFirstInsertionPt()); Builder.SetCurrentDebugLocation(ToReplace->getDebugLoc()); // Extract second element from landingpad return value. We will attach @@ -1435,7 +1435,7 @@ makeStatepointExplicitImpl(const CallSite CS, /* to replace */ NormalDest->getUniquePredecessor() && "can't safely insert in this block!"); - Builder.SetInsertPoint(NormalDest->getFirstInsertionPt()); + Builder.SetInsertPoint(&*NormalDest->getFirstInsertionPt()); // gc relocates will be generated later as if it were regular call // statepoint @@ -1708,8 +1708,8 @@ static void relocationViaAlloca( // Insert the clobbering stores. These may get intermixed with the // gc.results and gc.relocates, but that's fine. if (auto II = dyn_cast(Statepoint)) { - InsertClobbersAt(II->getNormalDest()->getFirstInsertionPt()); - InsertClobbersAt(II->getUnwindDest()->getFirstInsertionPt()); + InsertClobbersAt(&*II->getNormalDest()->getFirstInsertionPt()); + InsertClobbersAt(&*II->getUnwindDest()->getFirstInsertionPt()); } else { InsertClobbersAt(cast(Statepoint)->getNextNode()); } @@ -1819,18 +1819,17 @@ static void insertUseHolderAfter(CallSite &CS, const ArrayRef Values, "__tmp_use", FunctionType::get(Type::getVoidTy(M->getContext()), true))); if (CS.isCall()) { // For call safepoints insert dummy calls right after safepoint - BasicBlock::iterator Next(CS.getInstruction()); - Next++; - Holders.push_back(CallInst::Create(Func, Values, "", Next)); + Holders.push_back(CallInst::Create(Func, Values, "", + &*++CS.getInstruction()->getIterator())); return; } // For invoke safepooints insert dummy calls both in normal and // exceptional destination blocks auto *II = cast(CS.getInstruction()); Holders.push_back(CallInst::Create( - Func, Values, "", II->getNormalDest()->getFirstInsertionPt())); + Func, Values, "", &*II->getNormalDest()->getFirstInsertionPt())); Holders.push_back(CallInst::Create( - Func, Values, "", II->getUnwindDest()->getFirstInsertionPt())); + Func, Values, "", &*II->getUnwindDest()->getFirstInsertionPt())); } static void findLiveReferences( @@ -2153,9 +2152,9 @@ static void rematerializeLiveValues(CallSite CS, InvokeInst *Invoke = cast(CS.getInstruction()); Instruction *NormalInsertBefore = - Invoke->getNormalDest()->getFirstInsertionPt(); + &*Invoke->getNormalDest()->getFirstInsertionPt(); Instruction *UnwindInsertBefore = - Invoke->getUnwindDest()->getFirstInsertionPt(); + &*Invoke->getUnwindDest()->getFirstInsertionPt(); Instruction *NormalRematerializedValue = rematerializeChain(NormalInsertBefore); @@ -2724,7 +2723,7 @@ static void findLiveSetAtInst(Instruction *Inst, GCPtrLivenessData &Data, // call result is not live (normal), nor are it's arguments // (unless they're used again later). This adjustment is // specifically what we need to relocate - BasicBlock::reverse_iterator rend(Inst); + BasicBlock::reverse_iterator rend(Inst->getIterator()); computeLiveInValues(BB->rbegin(), rend, LiveOut); LiveOut.erase(Inst); Out.insert(LiveOut.begin(), LiveOut.end()); diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index a35d2e8df4d..4362030b231 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -1132,7 +1132,7 @@ CallOverdefined: // entry block executable and merge in the actual arguments to the call into // the formal arguments of the function. if (!TrackingIncomingArguments.empty() && TrackingIncomingArguments.count(F)){ - MarkBlockExecutable(F->begin()); + MarkBlockExecutable(&F->front()); // Propagate information from this call site into the callee. CallSite::arg_iterator CAI = CS.arg_begin(); @@ -1141,17 +1141,17 @@ CallOverdefined: // If this argument is byval, and if the function is not readonly, there // will be an implicit copy formed of the input aggregate. if (AI->hasByValAttr() && !F->onlyReadsMemory()) { - markOverdefined(AI); + markOverdefined(&*AI); continue; } if (StructType *STy = dyn_cast(AI->getType())) { for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { LatticeVal CallArg = getStructValueState(*CAI, i); - mergeInValue(getStructValueState(AI, i), AI, CallArg); + mergeInValue(getStructValueState(&*AI, i), &*AI, CallArg); } } else { - mergeInValue(AI, getValueState(*CAI)); + mergeInValue(&*AI, getValueState(*CAI)); } } } @@ -1252,18 +1252,18 @@ void SCCPSolver::Solve() { /// even if X isn't defined. bool SCCPSolver::ResolvedUndefsIn(Function &F) { for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { - if (!BBExecutable.count(BB)) + if (!BBExecutable.count(&*BB)) continue; - for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) { + for (Instruction &I : *BB) { // Look for instructions which produce undef values. - if (I->getType()->isVoidTy()) continue; + if (I.getType()->isVoidTy()) continue; - if (StructType *STy = dyn_cast(I->getType())) { + if (StructType *STy = dyn_cast(I.getType())) { // Only a few things that can be structs matter for undef. // Tracked calls must never be marked overdefined in ResolvedUndefsIn. - if (CallSite CS = CallSite(I)) + if (CallSite CS = CallSite(&I)) if (Function *F = CS.getCalledFunction()) if (MRVFunctionsTracked.count(F)) continue; @@ -1276,14 +1276,14 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // Send the results of everything else to overdefined. We could be // more precise than this but it isn't worth bothering. for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) { - LatticeVal &LV = getStructValueState(I, i); + LatticeVal &LV = getStructValueState(&I, i); if (LV.isUndefined()) - markOverdefined(LV, I); + markOverdefined(LV, &I); } continue; } - LatticeVal &LV = getValueState(I); + LatticeVal &LV = getValueState(&I); if (!LV.isUndefined()) continue; // extractvalue is safe; check here because the argument is a struct. @@ -1293,24 +1293,24 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // Compute the operand LatticeVals, for convenience below. // Anything taking a struct is conservatively assumed to require // overdefined markings. - if (I->getOperand(0)->getType()->isStructTy()) { - markOverdefined(I); + if (I.getOperand(0)->getType()->isStructTy()) { + markOverdefined(&I); return true; } - LatticeVal Op0LV = getValueState(I->getOperand(0)); + LatticeVal Op0LV = getValueState(I.getOperand(0)); LatticeVal Op1LV; - if (I->getNumOperands() == 2) { - if (I->getOperand(1)->getType()->isStructTy()) { - markOverdefined(I); + if (I.getNumOperands() == 2) { + if (I.getOperand(1)->getType()->isStructTy()) { + markOverdefined(&I); return true; } - Op1LV = getValueState(I->getOperand(1)); + Op1LV = getValueState(I.getOperand(1)); } // If this is an instructions whose result is defined even if the input is // not fully defined, propagate the information. - Type *ITy = I->getType(); - switch (I->getOpcode()) { + Type *ITy = I.getType(); + switch (I.getOpcode()) { case Instruction::Add: case Instruction::Sub: case Instruction::Trunc: @@ -1324,9 +1324,9 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { case Instruction::FRem: // Floating-point binary operation: be conservative. if (Op0LV.isUndefined() && Op1LV.isUndefined()) - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); else - markOverdefined(I); + markOverdefined(&I); return true; case Instruction::ZExt: case Instruction::SExt: @@ -1338,7 +1338,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { case Instruction::SIToFP: case Instruction::UIToFP: // undef -> 0; some outputs are impossible - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); return true; case Instruction::Mul: case Instruction::And: @@ -1347,7 +1347,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { break; // undef * X -> 0. X could be zero. // undef & X -> 0. X could be zero. - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); return true; case Instruction::Or: @@ -1355,7 +1355,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { if (Op0LV.isUndefined() && Op1LV.isUndefined()) break; // undef | X -> -1. X could be -1. - markForcedConstant(I, Constant::getAllOnesValue(ITy)); + markForcedConstant(&I, Constant::getAllOnesValue(ITy)); return true; case Instruction::Xor: @@ -1363,7 +1363,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // necessary, but we try to be nice to people who expect this // behavior in simple cases if (Op0LV.isUndefined() && Op1LV.isUndefined()) { - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); return true; } // undef ^ X -> undef @@ -1379,7 +1379,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // undef / X -> 0. X could be maxint. // undef % X -> 0. X could be 1. - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); return true; case Instruction::AShr: @@ -1387,7 +1387,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { if (Op1LV.isUndefined()) break; // undef >>a X -> all ones - markForcedConstant(I, Constant::getAllOnesValue(ITy)); + markForcedConstant(&I, Constant::getAllOnesValue(ITy)); return true; case Instruction::LShr: case Instruction::Shl: @@ -1397,17 +1397,17 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // undef << X -> 0 // undef >> X -> 0 - markForcedConstant(I, Constant::getNullValue(ITy)); + markForcedConstant(&I, Constant::getNullValue(ITy)); return true; case Instruction::Select: - Op1LV = getValueState(I->getOperand(1)); + Op1LV = getValueState(I.getOperand(1)); // undef ? X : Y -> X or Y. There could be commonality between X/Y. if (Op0LV.isUndefined()) { if (!Op1LV.isConstant()) // Pick the constant one if there is any. - Op1LV = getValueState(I->getOperand(2)); + Op1LV = getValueState(I.getOperand(2)); } else if (Op1LV.isUndefined()) { // c ? undef : undef -> undef. No change. - Op1LV = getValueState(I->getOperand(2)); + Op1LV = getValueState(I.getOperand(2)); if (Op1LV.isUndefined()) break; // Otherwise, c ? undef : x -> x. @@ -1416,9 +1416,9 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { } if (Op1LV.isConstant()) - markForcedConstant(I, Op1LV.getConstant()); + markForcedConstant(&I, Op1LV.getConstant()); else - markOverdefined(I); + markOverdefined(&I); return true; case Instruction::Load: // A load here means one of two things: a load of undef from a global, @@ -1427,9 +1427,9 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { break; case Instruction::ICmp: // X == undef -> undef. Other comparisons get more complicated. - if (cast(I)->isEquality()) + if (cast(&I)->isEquality()) break; - markOverdefined(I); + markOverdefined(&I); return true; case Instruction::Call: case Instruction::Invoke: { @@ -1438,19 +1438,19 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // 2. It could be constant-foldable. // Because of the way we solve return values, tracked calls must // never be marked overdefined in ResolvedUndefsIn. - if (Function *F = CallSite(I).getCalledFunction()) + if (Function *F = CallSite(&I).getCalledFunction()) if (TrackedRetVals.count(F)) break; // If the call is constant-foldable, we mark it overdefined because // we do not know what return values are valid. - markOverdefined(I); + markOverdefined(&I); return true; } default: // If we don't know what should happen here, conservatively mark it // overdefined. - markOverdefined(I); + markOverdefined(&I); return true; } } @@ -1468,7 +1468,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // false. if (isa(BI->getCondition())) { BI->setCondition(ConstantInt::getFalse(BI->getContext())); - markEdgeExecutable(BB, TI->getSuccessor(1)); + markEdgeExecutable(&*BB, TI->getSuccessor(1)); return true; } @@ -1490,7 +1490,7 @@ bool SCCPSolver::ResolvedUndefsIn(Function &F) { // the first constant. if (isa(SI->getCondition())) { SI->setCondition(SI->case_begin().getCaseValue()); - markEdgeExecutable(BB, SI->case_begin().getCaseSuccessor()); + markEdgeExecutable(&*BB, SI->case_begin().getCaseSuccessor()); return true; } @@ -1548,8 +1548,7 @@ static void DeleteInstructionInBlock(BasicBlock *BB) { Instruction *EndInst = BB->getTerminator(); // Last not to be deleted. while (EndInst != BB->begin()) { // Delete the next to last instruction. - BasicBlock::iterator I = EndInst; - Instruction *Inst = --I; + Instruction *Inst = &*--EndInst->getIterator(); if (!Inst->use_empty()) Inst->replaceAllUsesWith(UndefValue::get(Inst->getType())); if (Inst->isEHPad()) { @@ -1575,11 +1574,11 @@ bool SCCP::runOnFunction(Function &F) { SCCPSolver Solver(DL, TLI); // Mark the first block of the function as being executable. - Solver.MarkBlockExecutable(F.begin()); + Solver.MarkBlockExecutable(&F.front()); // Mark all arguments to the function as being overdefined. - for (Function::arg_iterator AI = F.arg_begin(), E = F.arg_end(); AI != E;++AI) - Solver.markAnythingOverdefined(AI); + for (Argument &AI : F.args()) + Solver.markAnythingOverdefined(&AI); // Solve for constants. bool ResolvedUndefs = true; @@ -1596,8 +1595,8 @@ bool SCCP::runOnFunction(Function &F) { // as we cannot modify the CFG of the function. for (Function::iterator BB = F.begin(), E = F.end(); BB != E; ++BB) { - if (!Solver.isBlockExecutable(BB)) { - DeleteInstructionInBlock(BB); + if (!Solver.isBlockExecutable(&*BB)) { + DeleteInstructionInBlock(&*BB); MadeChanges = true; continue; } @@ -1606,7 +1605,7 @@ bool SCCP::runOnFunction(Function &F) { // constants if we have found them to be of constant values. // for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { - Instruction *Inst = BI++; + Instruction *Inst = &*BI++; if (Inst->getType()->isVoidTy() || isa(Inst)) continue; @@ -1720,36 +1719,34 @@ bool IPSCCP::runOnModule(Module &M) { // If this is a strong or ODR definition of this function, then we can // propagate information about its result into callsites of it. if (!F->mayBeOverridden()) - Solver.AddTrackedFunction(F); + Solver.AddTrackedFunction(&*F); // If this function only has direct calls that we can see, we can track its // arguments and return value aggressively, and can assume it is not called // unless we see evidence to the contrary. if (F->hasLocalLinkage()) { - if (AddressIsTaken(F)) - AddressTakenFunctions.insert(F); + if (AddressIsTaken(&*F)) + AddressTakenFunctions.insert(&*F); else { - Solver.AddArgumentTrackedFunction(F); + Solver.AddArgumentTrackedFunction(&*F); continue; } } // Assume the function is called. - Solver.MarkBlockExecutable(F->begin()); + Solver.MarkBlockExecutable(&F->front()); // Assume nothing about the incoming arguments. - for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); - AI != E; ++AI) - Solver.markAnythingOverdefined(AI); + for (Argument &AI : F->args()) + Solver.markAnythingOverdefined(&AI); } // Loop over global variables. We inform the solver about any internal global // variables that do not have their 'addresses taken'. If they don't have // their addresses taken, we can propagate constants through them. - for (Module::global_iterator G = M.global_begin(), E = M.global_end(); - G != E; ++G) - if (!G->isConstant() && G->hasLocalLinkage() && !AddressIsTaken(G)) - Solver.TrackValueOfGlobalVariable(G); + for (GlobalVariable &G : M.globals()) + if (!G.isConstant() && G.hasLocalLinkage() && !AddressIsTaken(&G)) + Solver.TrackValueOfGlobalVariable(&G); // Solve for constants. bool ResolvedUndefs = true; @@ -1770,7 +1767,10 @@ bool IPSCCP::runOnModule(Module &M) { SmallVector BlocksToErase; for (Module::iterator F = M.begin(), E = M.end(); F != E; ++F) { - if (Solver.isBlockExecutable(F->begin())) { + if (F->isDeclaration()) + continue; + + if (Solver.isBlockExecutable(&F->front())) { for (Function::arg_iterator AI = F->arg_begin(), E = F->arg_end(); AI != E; ++AI) { if (AI->use_empty() || AI->getType()->isStructTy()) continue; @@ -1778,7 +1778,7 @@ bool IPSCCP::runOnModule(Module &M) { // TODO: Could use getStructLatticeValueFor to find out if the entire // result is a constant and replace it entirely if so. - LatticeVal IV = Solver.getLatticeValueFor(AI); + LatticeVal IV = Solver.getLatticeValueFor(&*AI); if (IV.isOverdefined()) continue; Constant *CST = IV.isConstant() ? @@ -1793,27 +1793,27 @@ bool IPSCCP::runOnModule(Module &M) { } for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) { - if (!Solver.isBlockExecutable(BB)) { - DeleteInstructionInBlock(BB); + if (!Solver.isBlockExecutable(&*BB)) { + DeleteInstructionInBlock(&*BB); MadeChanges = true; TerminatorInst *TI = BB->getTerminator(); for (BasicBlock *Succ : TI->successors()) { if (!Succ->empty() && isa(Succ->begin())) - Succ->removePredecessor(BB); + Succ->removePredecessor(&*BB); } if (!TI->use_empty()) TI->replaceAllUsesWith(UndefValue::get(TI->getType())); TI->eraseFromParent(); - new UnreachableInst(M.getContext(), BB); + new UnreachableInst(M.getContext(), &*BB); if (&*BB != &F->front()) - BlocksToErase.push_back(BB); + BlocksToErase.push_back(&*BB); continue; } for (BasicBlock::iterator BI = BB->begin(), E = BB->end(); BI != E; ) { - Instruction *Inst = BI++; + Instruction *Inst = &*BI++; if (Inst->getType()->isVoidTy() || Inst->getType()->isStructTy()) continue; diff --git a/lib/Transforms/Scalar/SROA.cpp b/lib/Transforms/Scalar/SROA.cpp index 465bbd1d31b..b0cbdc15752 100644 --- a/lib/Transforms/Scalar/SROA.cpp +++ b/lib/Transforms/Scalar/SROA.cpp @@ -1158,7 +1158,7 @@ static bool isSafePHIToSpeculate(PHINode &PN) { // Ensure that there are no instructions between the PHI and the load that // could store. - for (BasicBlock::iterator BBI = &PN; &*BBI != LI; ++BBI) + for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; @@ -2442,7 +2442,7 @@ private: DL.getTypeStoreSizeInBits(LI.getType()) && "Non-byte-multiple bit width"); // Move the insertion point just past the load so that we can refer to it. - IRB.SetInsertPoint(std::next(BasicBlock::iterator(&LI))); + IRB.SetInsertPoint(&*std::next(BasicBlock::iterator(&LI))); // Create a placeholder value with the same type as LI to use as the // basis for the new value. This allows us to replace the uses of LI with // the computed value, and then replace the placeholder with LI, leaving @@ -2920,7 +2920,7 @@ private: // dominate the PHI. IRBuilderTy PtrBuilder(IRB); if (isa(OldPtr)) - PtrBuilder.SetInsertPoint(OldPtr->getParent()->getFirstInsertionPt()); + PtrBuilder.SetInsertPoint(&*OldPtr->getParent()->getFirstInsertionPt()); else PtrBuilder.SetInsertPoint(OldPtr); PtrBuilder.SetCurrentDebugLocation(OldPtr->getDebugLoc()); @@ -3566,7 +3566,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { "Cannot represent alloca access size using 64-bit integers!"); Instruction *BasePtr = cast(LI->getPointerOperand()); - IRB.SetInsertPoint(BasicBlock::iterator(LI)); + IRB.SetInsertPoint(LI); DEBUG(dbgs() << " Splitting load: " << *LI << "\n"); @@ -3618,7 +3618,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { } Value *StoreBasePtr = SI->getPointerOperand(); - IRB.SetInsertPoint(BasicBlock::iterator(SI)); + IRB.SetInsertPoint(SI); DEBUG(dbgs() << " Splitting store of load: " << *SI << "\n"); @@ -3707,7 +3707,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { if (SplitLoads) { PLoad = (*SplitLoads)[Idx]; } else { - IRB.SetInsertPoint(BasicBlock::iterator(LI)); + IRB.SetInsertPoint(LI); PLoad = IRB.CreateAlignedLoad( getAdjustedPtr(IRB, DL, LoadBasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), @@ -3717,7 +3717,7 @@ bool SROA::presplitLoadsAndStores(AllocaInst &AI, AllocaSlices &AS) { } // And store this partition. - IRB.SetInsertPoint(BasicBlock::iterator(SI)); + IRB.SetInsertPoint(SI); StoreInst *PStore = IRB.CreateAlignedStore( PLoad, getAdjustedPtr(IRB, DL, StoreBasePtr, APInt(DL.getPointerSizeInBits(), PartOffset), diff --git a/lib/Transforms/Scalar/ScalarReplAggregates.cpp b/lib/Transforms/Scalar/ScalarReplAggregates.cpp index d955da7ce75..65cb0086753 100644 --- a/lib/Transforms/Scalar/ScalarReplAggregates.cpp +++ b/lib/Transforms/Scalar/ScalarReplAggregates.cpp @@ -382,8 +382,8 @@ AllocaInst *ConvertToScalarInfo::TryConvert(AllocaInst *AI) { // Create and insert the integer alloca. NewTy = IntegerType::get(AI->getContext(), BitWidth); } - AllocaInst *NewAI = new AllocaInst(NewTy, nullptr, "", - AI->getParent()->begin()); + AllocaInst *NewAI = + new AllocaInst(NewTy, nullptr, "", &AI->getParent()->front()); ConvertUsesToScalar(AI, NewAI, 0, nullptr); return NewAI; } @@ -1195,7 +1195,7 @@ static bool isSafePHIToSpeculate(PHINode *PN) { // Ensure that there are no instructions between the PHI and the load that // could store. - for (BasicBlock::iterator BBI = PN; &*BBI != LI; ++BBI) + for (BasicBlock::iterator BBI(PN); &*BBI != LI; ++BBI) if (BBI->mayWriteToMemory()) return false; diff --git a/lib/Transforms/Scalar/Scalarizer.cpp b/lib/Transforms/Scalar/Scalarizer.cpp index c99f13be6a9..054bacdc706 100644 --- a/lib/Transforms/Scalar/Scalarizer.cpp +++ b/lib/Transforms/Scalar/Scalarizer.cpp @@ -254,10 +254,9 @@ bool Scalarizer::doInitialization(Module &M) { bool Scalarizer::runOnFunction(Function &F) { assert(Gathered.empty() && Scattered.empty()); - for (Function::iterator BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI) { - BasicBlock *BB = BBI; - for (BasicBlock::iterator II = BB->begin(), IE = BB->end(); II != IE;) { - Instruction *I = II; + for (BasicBlock &BB : F) { + for (BasicBlock::iterator II = BB.begin(), IE = BB.end(); II != IE;) { + Instruction *I = &*II; bool Done = visit(I); ++II; if (Done && I->getType()->isVoidTy()) @@ -286,7 +285,7 @@ Scatterer Scalarizer::scatter(Instruction *Point, Value *V) { } // In the fallback case, just put the scattered before Point and // keep the result local to Point. - return Scatterer(Point->getParent(), Point, V); + return Scatterer(Point->getParent(), Point->getIterator(), V); } // Replace Op with the gathered form of the components in CV. Defer the @@ -378,7 +377,7 @@ bool Scalarizer::splitBinary(Instruction &I, const Splitter &Split) { return false; unsigned NumElems = VT->getNumElements(); - IRBuilder<> Builder(I.getParent(), &I); + IRBuilder<> Builder(&I); Scatterer Op0 = scatter(&I, I.getOperand(0)); Scatterer Op1 = scatter(&I, I.getOperand(1)); assert(Op0.size() == NumElems && "Mismatched binary operation"); @@ -398,7 +397,7 @@ bool Scalarizer::visitSelectInst(SelectInst &SI) { return false; unsigned NumElems = VT->getNumElements(); - IRBuilder<> Builder(SI.getParent(), &SI); + IRBuilder<> Builder(&SI); Scatterer Op1 = scatter(&SI, SI.getOperand(1)); Scatterer Op2 = scatter(&SI, SI.getOperand(2)); assert(Op1.size() == NumElems && "Mismatched select"); @@ -439,7 +438,7 @@ bool Scalarizer::visitGetElementPtrInst(GetElementPtrInst &GEPI) { if (!VT) return false; - IRBuilder<> Builder(GEPI.getParent(), &GEPI); + IRBuilder<> Builder(&GEPI); unsigned NumElems = VT->getNumElements(); unsigned NumIndices = GEPI.getNumIndices(); @@ -473,7 +472,7 @@ bool Scalarizer::visitCastInst(CastInst &CI) { return false; unsigned NumElems = VT->getNumElements(); - IRBuilder<> Builder(CI.getParent(), &CI); + IRBuilder<> Builder(&CI); Scatterer Op0 = scatter(&CI, CI.getOperand(0)); assert(Op0.size() == NumElems && "Mismatched cast"); ValueVector Res; @@ -493,7 +492,7 @@ bool Scalarizer::visitBitCastInst(BitCastInst &BCI) { unsigned DstNumElems = DstVT->getNumElements(); unsigned SrcNumElems = SrcVT->getNumElements(); - IRBuilder<> Builder(BCI.getParent(), &BCI); + IRBuilder<> Builder(&BCI); Scatterer Op0 = scatter(&BCI, BCI.getOperand(0)); ValueVector Res; Res.resize(DstNumElems); @@ -570,7 +569,7 @@ bool Scalarizer::visitPHINode(PHINode &PHI) { return false; unsigned NumElems = VT->getNumElements(); - IRBuilder<> Builder(PHI.getParent(), &PHI); + IRBuilder<> Builder(&PHI); ValueVector Res; Res.resize(NumElems); @@ -601,7 +600,7 @@ bool Scalarizer::visitLoadInst(LoadInst &LI) { return false; unsigned NumElems = Layout.VecTy->getNumElements(); - IRBuilder<> Builder(LI.getParent(), &LI); + IRBuilder<> Builder(&LI); Scatterer Ptr = scatter(&LI, LI.getPointerOperand()); ValueVector Res; Res.resize(NumElems); @@ -626,7 +625,7 @@ bool Scalarizer::visitStoreInst(StoreInst &SI) { return false; unsigned NumElems = Layout.VecTy->getNumElements(); - IRBuilder<> Builder(SI.getParent(), &SI); + IRBuilder<> Builder(&SI); Scatterer Ptr = scatter(&SI, SI.getPointerOperand()); Scatterer Val = scatter(&SI, FullValue); @@ -658,7 +657,7 @@ bool Scalarizer::finish() { Value *Res = UndefValue::get(Ty); BasicBlock *BB = Op->getParent(); unsigned Count = Ty->getVectorNumElements(); - IRBuilder<> Builder(BB, Op); + IRBuilder<> Builder(Op); if (isa(Op)) Builder.SetInsertPoint(BB, BB->getFirstInsertionPt()); for (unsigned I = 0; I < Count; ++I) diff --git a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp index 44ca2b78b38..d024bc05f38 100644 --- a/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp +++ b/lib/Transforms/Scalar/SeparateConstOffsetFromGEP.cpp @@ -1154,7 +1154,7 @@ bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) { Node != GraphTraits::nodes_end(DT); ++Node) { BasicBlock *BB = Node->getBlock(); for (auto I = BB->begin(); I != BB->end(); ) { - Instruction *Cur = I++; + Instruction *Cur = &*I++; Changed |= reuniteExts(Cur); } } diff --git a/lib/Transforms/Scalar/SimplifyCFGPass.cpp b/lib/Transforms/Scalar/SimplifyCFGPass.cpp index c766fae56df..63c8836bf38 100644 --- a/lib/Transforms/Scalar/SimplifyCFGPass.cpp +++ b/lib/Transforms/Scalar/SimplifyCFGPass.cpp @@ -68,15 +68,14 @@ static bool mergeEmptyReturnBlocks(Function &F) { // single PHI node that is the operand to the return. if (Ret != &BB.front()) { // Check for something else in the block. - BasicBlock::iterator I = Ret; + BasicBlock::iterator I(Ret); --I; // Skip over debug info. while (isa(I) && I != BB.begin()) --I; if (!isa(I) && - (!isa(I) || I != BB.begin() || - Ret->getNumOperands() == 0 || - Ret->getOperand(0) != I)) + (!isa(I) || I != BB.begin() || Ret->getNumOperands() == 0 || + Ret->getOperand(0) != &*I)) continue; } @@ -137,7 +136,7 @@ static bool iterativelySimplifyCFG(Function &F, const TargetTransformInfo &TTI, // Loop over all of the basic blocks and remove them if they are unneeded. for (Function::iterator BBIt = F.begin(); BBIt != F.end(); ) { - if (SimplifyCFG(BBIt++, TTI, BonusInstThreshold, AC)) { + if (SimplifyCFG(&*BBIt++, TTI, BonusInstThreshold, AC)) { LocalChange = true; ++NumSimpl; } diff --git a/lib/Transforms/Scalar/Sink.cpp b/lib/Transforms/Scalar/Sink.cpp index 17a6858a0c0..8b916347999 100644 --- a/lib/Transforms/Scalar/Sink.cpp +++ b/lib/Transforms/Scalar/Sink.cpp @@ -134,7 +134,7 @@ bool Sinking::ProcessBlock(BasicBlock &BB) { bool ProcessedBegin = false; SmallPtrSet Stores; do { - Instruction *Inst = I; // The instruction to sink. + Instruction *Inst = &*I; // The instruction to sink. // Predecrement I (if it's not begin) so that it isn't invalidated by // sinking. @@ -279,6 +279,6 @@ bool Sinking::SinkInstruction(Instruction *Inst, dbgs() << ")\n"); // Move the instruction. - Inst->moveBefore(SuccToSinkTo->getFirstInsertionPt()); + Inst->moveBefore(&*SuccToSinkTo->getFirstInsertionPt()); return true; } diff --git a/lib/Transforms/Scalar/SpeculativeExecution.cpp b/lib/Transforms/Scalar/SpeculativeExecution.cpp index ff3f00a2e2f..147d615488f 100644 --- a/lib/Transforms/Scalar/SpeculativeExecution.cpp +++ b/lib/Transforms/Scalar/SpeculativeExecution.cpp @@ -227,7 +227,7 @@ bool SpeculativeExecution::considerHoistingFromTo(BasicBlock &FromBlock, // changes the list that I is iterating through. auto Current = I; ++I; - if (!NotHoisted.count(Current)) { + if (!NotHoisted.count(&*Current)) { Current->moveBefore(ToBlock.getTerminator()); } } diff --git a/lib/Transforms/Scalar/StructurizeCFG.cpp b/lib/Transforms/Scalar/StructurizeCFG.cpp index 28812a45719..662513c7d8a 100644 --- a/lib/Transforms/Scalar/StructurizeCFG.cpp +++ b/lib/Transforms/Scalar/StructurizeCFG.cpp @@ -899,14 +899,14 @@ void StructurizeCFG::rebuildSSA() { continue; } - if (DT->dominates(II, User)) + if (DT->dominates(&*II, User)) continue; if (!Initialized) { Value *Undef = UndefValue::get(II->getType()); Updater.Initialize(II->getType(), ""); Updater.AddAvailableValue(&Func->getEntryBlock(), Undef); - Updater.AddAvailableValue(BB, II); + Updater.AddAvailableValue(BB, &*II); Initialized = true; } Updater.RewriteUseAfterInsertions(U); diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp index 367fe8b61a7..1eddef7ef94 100644 --- a/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -406,7 +406,7 @@ bool TailCallElim::runTRE(Function &F) { // Until this is resolved, disable this transformation if that would ever // happen. This bug is PR962. for (Function::iterator BBI = F.begin(), E = F.end(); BBI != E; /*in loop*/) { - BasicBlock *BB = BBI++; // FoldReturnAndProcessPred may delete BB. + BasicBlock *BB = &*BBI++; // FoldReturnAndProcessPred may delete BB. if (ReturnInst *Ret = dyn_cast(BB->getTerminator())) { bool Change = ProcessReturningBlock(Ret, OldEntry, TailCallsAreMarkedTail, ArgumentPHIs, !CanTRETailMarkedCall); @@ -576,7 +576,7 @@ TailCallElim::FindTRECandidate(Instruction *TI, // Scan backwards from the return, checking to see if there is a tail call in // this block. If so, set CI to it. CallInst *CI = nullptr; - BasicBlock::iterator BBI = TI; + BasicBlock::iterator BBI(TI); while (true) { CI = dyn_cast(BBI); if (CI && CI->getCalledFunction() == F) @@ -597,9 +597,8 @@ TailCallElim::FindTRECandidate(Instruction *TI, // and disable this xform in this case, because the code generator will // lower the call to fabs into inline code. if (BB == &F->getEntryBlock() && - FirstNonDbg(BB->front()) == CI && - FirstNonDbg(std::next(BB->begin())) == TI && - CI->getCalledFunction() && + FirstNonDbg(BB->front().getIterator()) == CI && + FirstNonDbg(std::next(BB->begin())) == TI && CI->getCalledFunction() && !TTI->isLoweredToCall(CI->getCalledFunction())) { // A single-block function with just a call and a return. Check that // the arguments match. @@ -638,19 +637,19 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, // tail call if all of the instructions between the call and the return are // movable to above the call itself, leaving the call next to the return. // Check that this is the case now. - BasicBlock::iterator BBI = CI; + BasicBlock::iterator BBI(CI); for (++BBI; &*BBI != Ret; ++BBI) { - if (CanMoveAboveCall(BBI, CI)) continue; + if (CanMoveAboveCall(&*BBI, CI)) continue; // If we can't move the instruction above the call, it might be because it // is an associative and commutative operation that could be transformed // using accumulator recursion elimination. Check to see if this is the // case, and if so, remember the initial accumulator value for later. if ((AccumulatorRecursionEliminationInitVal = - CanTransformAccumulatorRecursion(BBI, CI))) { + CanTransformAccumulatorRecursion(&*BBI, CI))) { // Yes, this is accumulator recursion. Remember which instruction // accumulates. - AccumulatorRecursionInstr = BBI; + AccumulatorRecursionInstr = &*BBI; } else { return false; // Otherwise, we cannot eliminate the tail recursion! } @@ -700,19 +699,19 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, NEBI = NewEntry->begin(); OEBI != E; ) if (AllocaInst *AI = dyn_cast(OEBI++)) if (isa(AI->getArraySize())) - AI->moveBefore(NEBI); + AI->moveBefore(&*NEBI); // Now that we have created a new block, which jumps to the entry // block, insert a PHI node for each argument of the function. // For now, we initialize each PHI to only have the real arguments // which are passed in. - Instruction *InsertPos = OldEntry->begin(); + Instruction *InsertPos = &OldEntry->front(); for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I) { PHINode *PN = PHINode::Create(I->getType(), 2, I->getName() + ".tr", InsertPos); I->replaceAllUsesWith(PN); // Everyone use the PHI node now! - PN->addIncoming(I, NewEntry); + PN->addIncoming(&*I, NewEntry); ArgumentPHIs.push_back(PN); } } @@ -741,10 +740,9 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, Instruction *AccRecInstr = AccumulatorRecursionInstr; // Start by inserting a new PHI node for the accumulator. pred_iterator PB = pred_begin(OldEntry), PE = pred_end(OldEntry); - PHINode *AccPN = - PHINode::Create(AccumulatorRecursionEliminationInitVal->getType(), - std::distance(PB, PE) + 1, - "accumulator.tr", OldEntry->begin()); + PHINode *AccPN = PHINode::Create( + AccumulatorRecursionEliminationInitVal->getType(), + std::distance(PB, PE) + 1, "accumulator.tr", &OldEntry->front()); // Loop over all of the predecessors of the tail recursion block. For the // real entry into the function we seed the PHI with the initial value, -- 2.34.1