1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "BlotMapVector.h"
34 #include "llvm/ADT/DenseMap.h"
35 #include "llvm/ADT/DenseSet.h"
36 #include "llvm/ADT/STLExtras.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/IR/CFG.h"
40 #include "llvm/IR/IRBuilder.h"
41 #include "llvm/IR/LLVMContext.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm::objcarc;
48 #define DEBUG_TYPE "objc-arc-opts"
50 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
53 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
54 /// as it finds a value with multiple uses.
55 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
56 if (Arg->hasOneUse()) {
57 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
58 return FindSingleUseIdentifiedObject(BC->getOperand(0));
59 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
60 if (GEP->hasAllZeroIndices())
61 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
62 if (IsForwarding(GetBasicARCInstKind(Arg)))
63 return FindSingleUseIdentifiedObject(
64 cast<CallInst>(Arg)->getArgOperand(0));
65 if (!IsObjCIdentifiedObject(Arg))
70 // If we found an identifiable object but it has multiple uses, but they are
71 // trivial uses, we can still consider this to be a single-use value.
72 if (IsObjCIdentifiedObject(Arg)) {
73 for (const User *U : Arg->users())
74 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
83 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
84 /// GetUnderlyingObjects except that it returns early when it sees the first
86 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V,
87 const DataLayout &DL) {
88 SmallPtrSet<const Value *, 4> Visited;
89 SmallVector<const Value *, 4> Worklist;
90 Worklist.push_back(V);
92 const Value *P = Worklist.pop_back_val();
93 P = GetUnderlyingObjCPtr(P, DL);
95 if (isa<AllocaInst>(P))
98 if (!Visited.insert(P).second)
101 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
102 Worklist.push_back(SI->getTrueValue());
103 Worklist.push_back(SI->getFalseValue());
107 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
108 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
109 Worklist.push_back(PN->getIncomingValue(i));
112 } while (!Worklist.empty());
120 /// \defgroup ARCOpt ARC Optimization.
123 // TODO: On code like this:
126 // stuff_that_cannot_release()
127 // objc_autorelease(%x)
128 // stuff_that_cannot_release()
130 // stuff_that_cannot_release()
131 // objc_autorelease(%x)
133 // The second retain and autorelease can be deleted.
135 // TODO: It should be possible to delete
136 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
137 // pairs if nothing is actually autoreleased between them. Also, autorelease
138 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
139 // after inlining) can be turned into plain release calls.
141 // TODO: Critical-edge splitting. If the optimial insertion point is
142 // a critical edge, the current algorithm has to fail, because it doesn't
143 // know how to split edges. It should be possible to make the optimizer
144 // think in terms of edges, rather than blocks, and then split critical
147 // TODO: OptimizeSequences could generalized to be Interprocedural.
149 // TODO: Recognize that a bunch of other objc runtime calls have
150 // non-escaping arguments and non-releasing arguments, and may be
151 // non-autoreleasing.
153 // TODO: Sink autorelease calls as far as possible. Unfortunately we
154 // usually can't sink them past other calls, which would be the main
155 // case where it would be useful.
157 // TODO: The pointer returned from objc_loadWeakRetained is retained.
159 // TODO: Delete release+retain pairs (rare).
161 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
162 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
163 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
164 STATISTIC(NumRets, "Number of return value forwarding "
165 "retain+autoreleases eliminated");
166 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
167 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
169 STATISTIC(NumRetainsBeforeOpt,
170 "Number of retains before optimization");
171 STATISTIC(NumReleasesBeforeOpt,
172 "Number of releases before optimization");
173 STATISTIC(NumRetainsAfterOpt,
174 "Number of retains after optimization");
175 STATISTIC(NumReleasesAfterOpt,
176 "Number of releases after optimization");
180 /// \brief Per-BasicBlock state.
182 /// The number of unique control paths from the entry which can reach this
184 unsigned TopDownPathCount;
186 /// The number of unique control paths to exits from this block.
187 unsigned BottomUpPathCount;
189 /// The top-down traversal uses this to record information known about a
190 /// pointer at the bottom of each block.
191 BlotMapVector<const Value *, TopDownPtrState> PerPtrTopDown;
193 /// The bottom-up traversal uses this to record information known about a
194 /// pointer at the top of each block.
195 BlotMapVector<const Value *, BottomUpPtrState> PerPtrBottomUp;
197 /// Effective predecessors of the current block ignoring ignorable edges and
198 /// ignored backedges.
199 SmallVector<BasicBlock *, 2> Preds;
201 /// Effective successors of the current block ignoring ignorable edges and
202 /// ignored backedges.
203 SmallVector<BasicBlock *, 2> Succs;
206 static const unsigned OverflowOccurredValue;
208 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
210 typedef decltype(PerPtrTopDown)::iterator top_down_ptr_iterator;
211 typedef decltype(PerPtrTopDown)::const_iterator const_top_down_ptr_iterator;
213 top_down_ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
214 top_down_ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
215 const_top_down_ptr_iterator top_down_ptr_begin() const {
216 return PerPtrTopDown.begin();
218 const_top_down_ptr_iterator top_down_ptr_end() const {
219 return PerPtrTopDown.end();
221 bool hasTopDownPtrs() const {
222 return !PerPtrTopDown.empty();
225 typedef decltype(PerPtrBottomUp)::iterator bottom_up_ptr_iterator;
227 PerPtrBottomUp)::const_iterator const_bottom_up_ptr_iterator;
229 bottom_up_ptr_iterator bottom_up_ptr_begin() {
230 return PerPtrBottomUp.begin();
232 bottom_up_ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
233 const_bottom_up_ptr_iterator bottom_up_ptr_begin() const {
234 return PerPtrBottomUp.begin();
236 const_bottom_up_ptr_iterator bottom_up_ptr_end() const {
237 return PerPtrBottomUp.end();
239 bool hasBottomUpPtrs() const {
240 return !PerPtrBottomUp.empty();
243 /// Mark this block as being an entry block, which has one path from the
244 /// entry by definition.
245 void SetAsEntry() { TopDownPathCount = 1; }
247 /// Mark this block as being an exit block, which has one path to an exit by
249 void SetAsExit() { BottomUpPathCount = 1; }
251 /// Attempt to find the PtrState object describing the top down state for
252 /// pointer Arg. Return a new initialized PtrState describing the top down
253 /// state for Arg if we do not find one.
254 TopDownPtrState &getPtrTopDownState(const Value *Arg) {
255 return PerPtrTopDown[Arg];
258 /// Attempt to find the PtrState object describing the bottom up state for
259 /// pointer Arg. Return a new initialized PtrState describing the bottom up
260 /// state for Arg if we do not find one.
261 BottomUpPtrState &getPtrBottomUpState(const Value *Arg) {
262 return PerPtrBottomUp[Arg];
265 /// Attempt to find the PtrState object describing the bottom up state for
267 bottom_up_ptr_iterator findPtrBottomUpState(const Value *Arg) {
268 return PerPtrBottomUp.find(Arg);
271 void clearBottomUpPointers() {
272 PerPtrBottomUp.clear();
275 void clearTopDownPointers() {
276 PerPtrTopDown.clear();
279 void InitFromPred(const BBState &Other);
280 void InitFromSucc(const BBState &Other);
281 void MergePred(const BBState &Other);
282 void MergeSucc(const BBState &Other);
284 /// Compute the number of possible unique paths from an entry to an exit
285 /// which pass through this block. This is only valid after both the
286 /// top-down and bottom-up traversals are complete.
288 /// Returns true if overflow occurred. Returns false if overflow did not
290 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
291 if (TopDownPathCount == OverflowOccurredValue ||
292 BottomUpPathCount == OverflowOccurredValue)
294 unsigned long long Product =
295 (unsigned long long)TopDownPathCount*BottomUpPathCount;
296 // Overflow occurred if any of the upper bits of Product are set or if all
297 // the lower bits of Product are all set.
298 return (Product >> 32) ||
299 ((PathCount = Product) == OverflowOccurredValue);
302 // Specialized CFG utilities.
303 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
304 edge_iterator pred_begin() const { return Preds.begin(); }
305 edge_iterator pred_end() const { return Preds.end(); }
306 edge_iterator succ_begin() const { return Succs.begin(); }
307 edge_iterator succ_end() const { return Succs.end(); }
309 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
310 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
312 bool isExit() const { return Succs.empty(); }
315 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
319 raw_ostream &operator<<(raw_ostream &OS, BBState &BBState);
322 void BBState::InitFromPred(const BBState &Other) {
323 PerPtrTopDown = Other.PerPtrTopDown;
324 TopDownPathCount = Other.TopDownPathCount;
327 void BBState::InitFromSucc(const BBState &Other) {
328 PerPtrBottomUp = Other.PerPtrBottomUp;
329 BottomUpPathCount = Other.BottomUpPathCount;
332 /// The top-down traversal uses this to merge information about predecessors to
333 /// form the initial state for a new block.
334 void BBState::MergePred(const BBState &Other) {
335 if (TopDownPathCount == OverflowOccurredValue)
338 // Other.TopDownPathCount can be 0, in which case it is either dead or a
339 // loop backedge. Loop backedges are special.
340 TopDownPathCount += Other.TopDownPathCount;
342 // In order to be consistent, we clear the top down pointers when by adding
343 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
345 if (TopDownPathCount == OverflowOccurredValue) {
346 clearTopDownPointers();
350 // Check for overflow. If we have overflow, fall back to conservative
352 if (TopDownPathCount < Other.TopDownPathCount) {
353 TopDownPathCount = OverflowOccurredValue;
354 clearTopDownPointers();
358 // For each entry in the other set, if our set has an entry with the same key,
359 // merge the entries. Otherwise, copy the entry and merge it with an empty
361 for (auto MI = Other.top_down_ptr_begin(), ME = Other.top_down_ptr_end();
363 auto Pair = PerPtrTopDown.insert(*MI);
364 Pair.first->second.Merge(Pair.second ? TopDownPtrState() : MI->second,
368 // For each entry in our set, if the other set doesn't have an entry with the
369 // same key, force it to merge with an empty entry.
370 for (auto MI = top_down_ptr_begin(), ME = top_down_ptr_end(); MI != ME; ++MI)
371 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
372 MI->second.Merge(TopDownPtrState(), /*TopDown=*/true);
375 /// The bottom-up traversal uses this to merge information about successors to
376 /// form the initial state for a new block.
377 void BBState::MergeSucc(const BBState &Other) {
378 if (BottomUpPathCount == OverflowOccurredValue)
381 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
382 // loop backedge. Loop backedges are special.
383 BottomUpPathCount += Other.BottomUpPathCount;
385 // In order to be consistent, we clear the top down pointers when by adding
386 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
388 if (BottomUpPathCount == OverflowOccurredValue) {
389 clearBottomUpPointers();
393 // Check for overflow. If we have overflow, fall back to conservative
395 if (BottomUpPathCount < Other.BottomUpPathCount) {
396 BottomUpPathCount = OverflowOccurredValue;
397 clearBottomUpPointers();
401 // For each entry in the other set, if our set has an entry with the
402 // same key, merge the entries. Otherwise, copy the entry and merge
403 // it with an empty entry.
404 for (auto MI = Other.bottom_up_ptr_begin(), ME = Other.bottom_up_ptr_end();
406 auto Pair = PerPtrBottomUp.insert(*MI);
407 Pair.first->second.Merge(Pair.second ? BottomUpPtrState() : MI->second,
411 // For each entry in our set, if the other set doesn't have an entry
412 // with the same key, force it to merge with an empty entry.
413 for (auto MI = bottom_up_ptr_begin(), ME = bottom_up_ptr_end(); MI != ME;
415 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
416 MI->second.Merge(BottomUpPtrState(), /*TopDown=*/false);
419 raw_ostream &llvm::operator<<(raw_ostream &OS, BBState &BBInfo) {
420 // Dump the pointers we are tracking.
421 OS << " TopDown State:\n";
422 if (!BBInfo.hasTopDownPtrs()) {
423 DEBUG(llvm::dbgs() << " NONE!\n");
425 for (auto I = BBInfo.top_down_ptr_begin(), E = BBInfo.top_down_ptr_end();
427 const PtrState &P = I->second;
428 OS << " Ptr: " << *I->first
429 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
430 << "\n ImpreciseRelease: "
431 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
432 << " HasCFGHazards: "
433 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
434 << " KnownPositive: "
435 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
437 << P.GetSeq() << "\n";
441 OS << " BottomUp State:\n";
442 if (!BBInfo.hasBottomUpPtrs()) {
443 DEBUG(llvm::dbgs() << " NONE!\n");
445 for (auto I = BBInfo.bottom_up_ptr_begin(), E = BBInfo.bottom_up_ptr_end();
447 const PtrState &P = I->second;
448 OS << " Ptr: " << *I->first
449 << "\n KnownSafe: " << (P.IsKnownSafe()?"true":"false")
450 << "\n ImpreciseRelease: "
451 << (P.IsTrackingImpreciseReleases()?"true":"false") << "\n"
452 << " HasCFGHazards: "
453 << (P.IsCFGHazardAfflicted()?"true":"false") << "\n"
454 << " KnownPositive: "
455 << (P.HasKnownPositiveRefCount()?"true":"false") << "\n"
457 << P.GetSeq() << "\n";
466 /// \brief The main ARC optimization pass.
467 class ObjCARCOpt : public FunctionPass {
469 ProvenanceAnalysis PA;
471 /// A cache of references to runtime entry point constants.
472 ARCRuntimeEntryPoints EP;
474 /// A cache of MDKinds that can be passed into other functions to propagate
475 /// MDKind identifiers.
476 ARCMDKindCache MDKindCache;
478 // This is used to track if a pointer is stored into an alloca.
479 DenseSet<const Value *> MultiOwnersSet;
481 /// A flag indicating whether this optimization pass should run.
484 /// Flags which determine whether each of the interesting runtine functions
485 /// is in fact used in the current function.
486 unsigned UsedInThisFunction;
488 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
489 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
491 void OptimizeIndividualCalls(Function &F);
493 void CheckForCFGHazards(const BasicBlock *BB,
494 DenseMap<const BasicBlock *, BBState> &BBStates,
495 BBState &MyStates) const;
496 bool VisitInstructionBottomUp(Instruction *Inst, BasicBlock *BB,
497 BlotMapVector<Value *, RRInfo> &Retains,
499 bool VisitBottomUp(BasicBlock *BB,
500 DenseMap<const BasicBlock *, BBState> &BBStates,
501 BlotMapVector<Value *, RRInfo> &Retains);
502 bool VisitInstructionTopDown(Instruction *Inst,
503 DenseMap<Value *, RRInfo> &Releases,
505 bool VisitTopDown(BasicBlock *BB,
506 DenseMap<const BasicBlock *, BBState> &BBStates,
507 DenseMap<Value *, RRInfo> &Releases);
508 bool Visit(Function &F, DenseMap<const BasicBlock *, BBState> &BBStates,
509 BlotMapVector<Value *, RRInfo> &Retains,
510 DenseMap<Value *, RRInfo> &Releases);
512 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
513 BlotMapVector<Value *, RRInfo> &Retains,
514 DenseMap<Value *, RRInfo> &Releases,
515 SmallVectorImpl<Instruction *> &DeadInsts, Module *M);
518 PairUpRetainsAndReleases(DenseMap<const BasicBlock *, BBState> &BBStates,
519 BlotMapVector<Value *, RRInfo> &Retains,
520 DenseMap<Value *, RRInfo> &Releases, Module *M,
521 SmallVectorImpl<Instruction *> &NewRetains,
522 SmallVectorImpl<Instruction *> &NewReleases,
523 SmallVectorImpl<Instruction *> &DeadInsts,
524 RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
525 Value *Arg, bool KnownSafe,
526 bool &AnyPairsCompletelyEliminated);
528 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
529 BlotMapVector<Value *, RRInfo> &Retains,
530 DenseMap<Value *, RRInfo> &Releases, Module *M);
532 void OptimizeWeakCalls(Function &F);
534 bool OptimizeSequences(Function &F);
536 void OptimizeReturns(Function &F);
539 void GatherStatistics(Function &F, bool AfterOptimization = false);
542 void getAnalysisUsage(AnalysisUsage &AU) const override;
543 bool doInitialization(Module &M) override;
544 bool runOnFunction(Function &F) override;
545 void releaseMemory() override;
549 ObjCARCOpt() : FunctionPass(ID) {
550 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
555 char ObjCARCOpt::ID = 0;
556 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
557 "objc-arc", "ObjC ARC optimization", false, false)
558 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
559 INITIALIZE_PASS_END(ObjCARCOpt,
560 "objc-arc", "ObjC ARC optimization", false, false)
562 Pass *llvm::createObjCARCOptPass() {
563 return new ObjCARCOpt();
566 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
567 AU.addRequired<ObjCARCAliasAnalysis>();
568 AU.addRequired<AliasAnalysis>();
569 // ARC optimization doesn't currently split critical edges.
570 AU.setPreservesCFG();
573 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
574 /// not a return value. Or, if it can be paired with an
575 /// objc_autoreleaseReturnValue, delete the pair and return true.
577 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
578 // Check for the argument being from an immediately preceding call or invoke.
579 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
580 ImmutableCallSite CS(Arg);
581 if (const Instruction *Call = CS.getInstruction()) {
582 if (Call->getParent() == RetainRV->getParent()) {
583 BasicBlock::const_iterator I = Call;
585 while (IsNoopInstruction(I)) ++I;
588 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
589 BasicBlock *RetainRVParent = RetainRV->getParent();
590 if (II->getNormalDest() == RetainRVParent) {
591 BasicBlock::const_iterator I = RetainRVParent->begin();
592 while (IsNoopInstruction(I)) ++I;
599 // Check for being preceded by an objc_autoreleaseReturnValue on the same
600 // pointer. In this case, we can delete the pair.
601 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
603 do --I; while (I != Begin && IsNoopInstruction(I));
604 if (GetBasicARCInstKind(I) == ARCInstKind::AutoreleaseRV &&
605 GetArgRCIdentityRoot(I) == Arg) {
609 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
610 << "Erasing " << *RetainRV << "\n");
613 EraseInstruction(RetainRV);
618 // Turn it to a plain objc_retain.
622 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
623 "objc_retain since the operand is not a return value.\n"
624 "Old = " << *RetainRV << "\n");
626 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Retain);
627 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
629 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
634 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
635 /// used as a return value.
636 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
637 Instruction *AutoreleaseRV,
638 ARCInstKind &Class) {
639 // Check for a return of the pointer value.
640 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
641 SmallVector<const Value *, 2> Users;
642 Users.push_back(Ptr);
644 Ptr = Users.pop_back_val();
645 for (const User *U : Ptr->users()) {
646 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
648 if (isa<BitCastInst>(U))
651 } while (!Users.empty());
656 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
657 "objc_autorelease since its operand is not used as a return "
659 "Old = " << *AutoreleaseRV << "\n");
661 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
662 Constant *NewDecl = EP.get(ARCRuntimeEntryPointKind::Autorelease);
663 AutoreleaseRVCI->setCalledFunction(NewDecl);
664 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
665 Class = ARCInstKind::Autorelease;
667 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
671 /// Visit each call, one at a time, and make simplifications without doing any
672 /// additional analysis.
673 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
674 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
675 // Reset all the flags in preparation for recomputing them.
676 UsedInThisFunction = 0;
678 // Visit all objc_* calls in F.
679 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
680 Instruction *Inst = &*I++;
682 ARCInstKind Class = GetBasicARCInstKind(Inst);
684 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
689 // Delete no-op casts. These function calls have special semantics, but
690 // the semantics are entirely implemented via lowering in the front-end,
691 // so by the time they reach the optimizer, they are just no-op calls
692 // which return their argument.
694 // There are gray areas here, as the ability to cast reference-counted
695 // pointers to raw void* and back allows code to break ARC assumptions,
696 // however these are currently considered to be unimportant.
697 case ARCInstKind::NoopCast:
700 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
701 EraseInstruction(Inst);
704 // If the pointer-to-weak-pointer is null, it's undefined behavior.
705 case ARCInstKind::StoreWeak:
706 case ARCInstKind::LoadWeak:
707 case ARCInstKind::LoadWeakRetained:
708 case ARCInstKind::InitWeak:
709 case ARCInstKind::DestroyWeak: {
710 CallInst *CI = cast<CallInst>(Inst);
711 if (IsNullOrUndef(CI->getArgOperand(0))) {
713 Type *Ty = CI->getArgOperand(0)->getType();
714 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
715 Constant::getNullValue(Ty),
717 llvm::Value *NewValue = UndefValue::get(CI->getType());
718 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
719 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
720 CI->replaceAllUsesWith(NewValue);
721 CI->eraseFromParent();
726 case ARCInstKind::CopyWeak:
727 case ARCInstKind::MoveWeak: {
728 CallInst *CI = cast<CallInst>(Inst);
729 if (IsNullOrUndef(CI->getArgOperand(0)) ||
730 IsNullOrUndef(CI->getArgOperand(1))) {
732 Type *Ty = CI->getArgOperand(0)->getType();
733 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
734 Constant::getNullValue(Ty),
737 llvm::Value *NewValue = UndefValue::get(CI->getType());
738 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
739 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
741 CI->replaceAllUsesWith(NewValue);
742 CI->eraseFromParent();
747 case ARCInstKind::RetainRV:
748 if (OptimizeRetainRVCall(F, Inst))
751 case ARCInstKind::AutoreleaseRV:
752 OptimizeAutoreleaseRVCall(F, Inst, Class);
756 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
757 if (IsAutorelease(Class) && Inst->use_empty()) {
758 CallInst *Call = cast<CallInst>(Inst);
759 const Value *Arg = Call->getArgOperand(0);
760 Arg = FindSingleUseIdentifiedObject(Arg);
765 // Create the declaration lazily.
766 LLVMContext &C = Inst->getContext();
768 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
769 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
771 NewCall->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease),
772 MDNode::get(C, None));
774 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
775 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
776 << *NewCall << "\n");
778 EraseInstruction(Call);
780 Class = ARCInstKind::Release;
784 // For functions which can never be passed stack arguments, add
786 if (IsAlwaysTail(Class)) {
788 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
789 "passed stack args: " << *Inst << "\n");
790 cast<CallInst>(Inst)->setTailCall();
793 // Ensure that functions that can never have a "tail" keyword due to the
794 // semantics of ARC truly do not do so.
795 if (IsNeverTail(Class)) {
797 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
799 cast<CallInst>(Inst)->setTailCall(false);
802 // Set nounwind as needed.
803 if (IsNoThrow(Class)) {
805 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
807 cast<CallInst>(Inst)->setDoesNotThrow();
810 if (!IsNoopOnNull(Class)) {
811 UsedInThisFunction |= 1 << unsigned(Class);
815 const Value *Arg = GetArgRCIdentityRoot(Inst);
817 // ARC calls with null are no-ops. Delete them.
818 if (IsNullOrUndef(Arg)) {
821 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
823 EraseInstruction(Inst);
827 // Keep track of which of retain, release, autorelease, and retain_block
828 // are actually present in this function.
829 UsedInThisFunction |= 1 << unsigned(Class);
831 // If Arg is a PHI, and one or more incoming values to the
832 // PHI are null, and the call is control-equivalent to the PHI, and there
833 // are no relevant side effects between the PHI and the call, the call
834 // could be pushed up to just those paths with non-null incoming values.
835 // For now, don't bother splitting critical edges for this.
836 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
837 Worklist.push_back(std::make_pair(Inst, Arg));
839 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
843 const PHINode *PN = dyn_cast<PHINode>(Arg);
846 // Determine if the PHI has any null operands, or any incoming
848 bool HasNull = false;
849 bool HasCriticalEdges = false;
850 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
852 GetRCIdentityRoot(PN->getIncomingValue(i));
853 if (IsNullOrUndef(Incoming))
855 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
856 .getNumSuccessors() != 1) {
857 HasCriticalEdges = true;
861 // If we have null operands and no critical edges, optimize.
862 if (!HasCriticalEdges && HasNull) {
863 SmallPtrSet<Instruction *, 4> DependingInstructions;
864 SmallPtrSet<const BasicBlock *, 4> Visited;
866 // Check that there is nothing that cares about the reference
867 // count between the call and the phi.
869 case ARCInstKind::Retain:
870 case ARCInstKind::RetainBlock:
871 // These can always be moved up.
873 case ARCInstKind::Release:
874 // These can't be moved across things that care about the retain
876 FindDependencies(NeedsPositiveRetainCount, Arg,
877 Inst->getParent(), Inst,
878 DependingInstructions, Visited, PA);
880 case ARCInstKind::Autorelease:
881 // These can't be moved across autorelease pool scope boundaries.
882 FindDependencies(AutoreleasePoolBoundary, Arg,
883 Inst->getParent(), Inst,
884 DependingInstructions, Visited, PA);
886 case ARCInstKind::RetainRV:
887 case ARCInstKind::AutoreleaseRV:
888 // Don't move these; the RV optimization depends on the autoreleaseRV
889 // being tail called, and the retainRV being immediately after a call
890 // (which might still happen if we get lucky with codegen layout, but
891 // it's not worth taking the chance).
894 llvm_unreachable("Invalid dependence flavor");
897 if (DependingInstructions.size() == 1 &&
898 *DependingInstructions.begin() == PN) {
901 // Clone the call into each predecessor that has a non-null value.
902 CallInst *CInst = cast<CallInst>(Inst);
903 Type *ParamTy = CInst->getArgOperand(0)->getType();
904 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
906 GetRCIdentityRoot(PN->getIncomingValue(i));
907 if (!IsNullOrUndef(Incoming)) {
908 CallInst *Clone = cast<CallInst>(CInst->clone());
909 Value *Op = PN->getIncomingValue(i);
910 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
911 if (Op->getType() != ParamTy)
912 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
913 Clone->setArgOperand(0, Op);
914 Clone->insertBefore(InsertPos);
916 DEBUG(dbgs() << "Cloning "
918 "And inserting clone at " << *InsertPos << "\n");
919 Worklist.push_back(std::make_pair(Clone, Incoming));
922 // Erase the original call.
923 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
924 EraseInstruction(CInst);
928 } while (!Worklist.empty());
932 /// If we have a top down pointer in the S_Use state, make sure that there are
933 /// no CFG hazards by checking the states of various bottom up pointers.
934 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
935 const bool SuccSRRIKnownSafe,
937 bool &SomeSuccHasSame,
938 bool &AllSuccsHaveSame,
939 bool &NotAllSeqEqualButKnownSafe,
940 bool &ShouldContinue) {
943 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
944 S.ClearSequenceProgress();
947 S.SetCFGHazardAfflicted(true);
948 ShouldContinue = true;
952 SomeSuccHasSame = true;
956 case S_MovableRelease:
957 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
958 AllSuccsHaveSame = false;
960 NotAllSeqEqualButKnownSafe = true;
963 llvm_unreachable("bottom-up pointer in retain state!");
965 llvm_unreachable("This should have been handled earlier.");
969 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
970 /// there are no CFG hazards by checking the states of various bottom up
972 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
973 const bool SuccSRRIKnownSafe,
975 bool &SomeSuccHasSame,
976 bool &AllSuccsHaveSame,
977 bool &NotAllSeqEqualButKnownSafe) {
980 SomeSuccHasSame = true;
984 case S_MovableRelease:
986 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
987 AllSuccsHaveSame = false;
989 NotAllSeqEqualButKnownSafe = true;
992 llvm_unreachable("bottom-up pointer in retain state!");
994 llvm_unreachable("This should have been handled earlier.");
998 /// Check for critical edges, loop boundaries, irreducible control flow, or
999 /// other CFG structures where moving code across the edge would result in it
1000 /// being executed more.
1002 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1003 DenseMap<const BasicBlock *, BBState> &BBStates,
1004 BBState &MyStates) const {
1005 // If any top-down local-use or possible-dec has a succ which is earlier in
1006 // the sequence, forget it.
1007 for (auto I = MyStates.top_down_ptr_begin(), E = MyStates.top_down_ptr_end();
1009 TopDownPtrState &S = I->second;
1010 const Sequence Seq = I->second.GetSeq();
1012 // We only care about S_Retain, S_CanRelease, and S_Use.
1016 // Make sure that if extra top down states are added in the future that this
1017 // code is updated to handle it.
1018 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1019 "Unknown top down sequence state.");
1021 const Value *Arg = I->first;
1022 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1023 bool SomeSuccHasSame = false;
1024 bool AllSuccsHaveSame = true;
1025 bool NotAllSeqEqualButKnownSafe = false;
1027 succ_const_iterator SI(TI), SE(TI, false);
1029 for (; SI != SE; ++SI) {
1030 // If VisitBottomUp has pointer information for this successor, take
1031 // what we know about it.
1032 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1034 assert(BBI != BBStates.end());
1035 const BottomUpPtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1036 const Sequence SuccSSeq = SuccS.GetSeq();
1038 // If bottom up, the pointer is in an S_None state, clear the sequence
1039 // progress since the sequence in the bottom up state finished
1040 // suggesting a mismatch in between retains/releases. This is true for
1041 // all three cases that we are handling here: S_Retain, S_Use, and
1043 if (SuccSSeq == S_None) {
1044 S.ClearSequenceProgress();
1048 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1050 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1052 // *NOTE* We do not use Seq from above here since we are allowing for
1053 // S.GetSeq() to change while we are visiting basic blocks.
1054 switch(S.GetSeq()) {
1056 bool ShouldContinue = false;
1057 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1058 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1064 case S_CanRelease: {
1065 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1066 SomeSuccHasSame, AllSuccsHaveSame,
1067 NotAllSeqEqualButKnownSafe);
1074 case S_MovableRelease:
1079 // If the state at the other end of any of the successor edges
1080 // matches the current state, require all edges to match. This
1081 // guards against loops in the middle of a sequence.
1082 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1083 S.ClearSequenceProgress();
1084 } else if (NotAllSeqEqualButKnownSafe) {
1085 // If we would have cleared the state foregoing the fact that we are known
1086 // safe, stop code motion. This is because whether or not it is safe to
1087 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1088 // are allowed to perform code motion.
1089 S.SetCFGHazardAfflicted(true);
1094 bool ObjCARCOpt::VisitInstructionBottomUp(
1095 Instruction *Inst, BasicBlock *BB, BlotMapVector<Value *, RRInfo> &Retains,
1096 BBState &MyStates) {
1097 bool NestingDetected = false;
1098 ARCInstKind Class = GetARCInstKind(Inst);
1099 const Value *Arg = nullptr;
1101 DEBUG(dbgs() << " Class: " << Class << "\n");
1104 case ARCInstKind::Release: {
1105 Arg = GetArgRCIdentityRoot(Inst);
1107 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1108 NestingDetected |= S.InitBottomUp(MDKindCache, Inst);
1111 case ARCInstKind::RetainBlock:
1112 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1113 // objc_retainBlocks to objc_retains. Thus at this point any
1114 // objc_retainBlocks that we see are not optimizable.
1116 case ARCInstKind::Retain:
1117 case ARCInstKind::RetainRV: {
1118 Arg = GetArgRCIdentityRoot(Inst);
1119 BottomUpPtrState &S = MyStates.getPtrBottomUpState(Arg);
1120 if (S.MatchWithRetain()) {
1121 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1122 // it's better to let it remain as the first instruction after a call.
1123 if (Class != ARCInstKind::RetainRV) {
1124 DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
1125 Retains[Inst] = S.GetRRInfo();
1127 S.ClearSequenceProgress();
1129 // A retain moving bottom up can be a use.
1132 case ARCInstKind::AutoreleasepoolPop:
1133 // Conservatively, clear MyStates for all known pointers.
1134 MyStates.clearBottomUpPointers();
1135 return NestingDetected;
1136 case ARCInstKind::AutoreleasepoolPush:
1137 case ARCInstKind::None:
1138 // These are irrelevant.
1139 return NestingDetected;
1140 case ARCInstKind::User:
1141 // If we have a store into an alloca of a pointer we are tracking, the
1142 // pointer has multiple owners implying that we must be more conservative.
1144 // This comes up in the context of a pointer being ``KnownSafe''. In the
1145 // presence of a block being initialized, the frontend will emit the
1146 // objc_retain on the original pointer and the release on the pointer loaded
1147 // from the alloca. The optimizer will through the provenance analysis
1148 // realize that the two are related, but since we only require KnownSafe in
1149 // one direction, will match the inner retain on the original pointer with
1150 // the guard release on the original pointer. This is fixed by ensuring that
1151 // in the presence of allocas we only unconditionally remove pointers if
1152 // both our retain and our release are KnownSafe.
1153 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1154 const DataLayout &DL = BB->getModule()->getDataLayout();
1155 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand(), DL)) {
1156 auto I = MyStates.findPtrBottomUpState(
1157 GetRCIdentityRoot(SI->getValueOperand()));
1158 if (I != MyStates.bottom_up_ptr_end())
1159 MultiOwnersSet.insert(I->first);
1167 // Consider any other possible effects of this instruction on each
1168 // pointer being tracked.
1169 for (auto MI = MyStates.bottom_up_ptr_begin(),
1170 ME = MyStates.bottom_up_ptr_end();
1172 const Value *Ptr = MI->first;
1174 continue; // Handled above.
1175 BottomUpPtrState &S = MI->second;
1177 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1180 S.HandlePotentialUse(BB, Inst, Ptr, PA, Class);
1183 return NestingDetected;
1186 bool ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1187 DenseMap<const BasicBlock *, BBState> &BBStates,
1188 BlotMapVector<Value *, RRInfo> &Retains) {
1190 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1192 bool NestingDetected = false;
1193 BBState &MyStates = BBStates[BB];
1195 // Merge the states from each successor to compute the initial state
1196 // for the current block.
1197 BBState::edge_iterator SI(MyStates.succ_begin()),
1198 SE(MyStates.succ_end());
1200 const BasicBlock *Succ = *SI;
1201 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1202 assert(I != BBStates.end());
1203 MyStates.InitFromSucc(I->second);
1205 for (; SI != SE; ++SI) {
1207 I = BBStates.find(Succ);
1208 assert(I != BBStates.end());
1209 MyStates.MergeSucc(I->second);
1213 DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
1214 << "Performing Dataflow:\n");
1216 // Visit all the instructions, bottom-up.
1217 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1218 Instruction *Inst = std::prev(I);
1220 // Invoke instructions are visited as part of their successors (below).
1221 if (isa<InvokeInst>(Inst))
1224 DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1226 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1229 // If there's a predecessor with an invoke, visit the invoke as if it were
1230 // part of this block, since we can't insert code after an invoke in its own
1231 // block, and we don't want to split critical edges.
1232 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1233 PE(MyStates.pred_end()); PI != PE; ++PI) {
1234 BasicBlock *Pred = *PI;
1235 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1236 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1239 DEBUG(llvm::dbgs() << "\nFinal State:\n" << BBStates[BB] << "\n");
1241 return NestingDetected;
1245 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1246 DenseMap<Value *, RRInfo> &Releases,
1247 BBState &MyStates) {
1248 bool NestingDetected = false;
1249 ARCInstKind Class = GetARCInstKind(Inst);
1250 const Value *Arg = nullptr;
1252 DEBUG(llvm::dbgs() << " Class: " << Class << "\n");
1255 case ARCInstKind::RetainBlock:
1256 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1257 // objc_retainBlocks to objc_retains. Thus at this point any
1258 // objc_retainBlocks that we see are not optimizable. We need to break since
1259 // a retain can be a potential use.
1261 case ARCInstKind::Retain:
1262 case ARCInstKind::RetainRV: {
1263 Arg = GetArgRCIdentityRoot(Inst);
1264 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1265 NestingDetected |= S.InitTopDown(Class, Inst);
1266 // A retain can be a potential use; procede to the generic checking
1270 case ARCInstKind::Release: {
1271 Arg = GetArgRCIdentityRoot(Inst);
1272 TopDownPtrState &S = MyStates.getPtrTopDownState(Arg);
1273 // Try to form a tentative pair in between this release instruction and the
1274 // top down pointers that we are tracking.
1275 if (S.MatchWithRelease(MDKindCache, Inst)) {
1276 // If we succeed, copy S's RRInfo into the Release -> {Retain Set
1277 // Map}. Then we clear S.
1278 DEBUG(llvm::dbgs() << " Matching with: " << *Inst << "\n");
1279 Releases[Inst] = S.GetRRInfo();
1280 S.ClearSequenceProgress();
1284 case ARCInstKind::AutoreleasepoolPop:
1285 // Conservatively, clear MyStates for all known pointers.
1286 MyStates.clearTopDownPointers();
1288 case ARCInstKind::AutoreleasepoolPush:
1289 case ARCInstKind::None:
1290 // These can not be uses of
1296 // Consider any other possible effects of this instruction on each
1297 // pointer being tracked.
1298 for (auto MI = MyStates.top_down_ptr_begin(),
1299 ME = MyStates.top_down_ptr_end();
1301 const Value *Ptr = MI->first;
1303 continue; // Handled above.
1304 TopDownPtrState &S = MI->second;
1305 if (S.HandlePotentialAlterRefCount(Inst, Ptr, PA, Class))
1308 S.HandlePotentialUse(Inst, Ptr, PA, Class);
1311 return NestingDetected;
1315 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1316 DenseMap<const BasicBlock *, BBState> &BBStates,
1317 DenseMap<Value *, RRInfo> &Releases) {
1318 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
1319 bool NestingDetected = false;
1320 BBState &MyStates = BBStates[BB];
1322 // Merge the states from each predecessor to compute the initial state
1323 // for the current block.
1324 BBState::edge_iterator PI(MyStates.pred_begin()),
1325 PE(MyStates.pred_end());
1327 const BasicBlock *Pred = *PI;
1328 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1329 assert(I != BBStates.end());
1330 MyStates.InitFromPred(I->second);
1332 for (; PI != PE; ++PI) {
1334 I = BBStates.find(Pred);
1335 assert(I != BBStates.end());
1336 MyStates.MergePred(I->second);
1340 DEBUG(llvm::dbgs() << "Before:\n" << BBStates[BB] << "\n"
1341 << "Performing Dataflow:\n");
1343 // Visit all the instructions, top-down.
1344 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1345 Instruction *Inst = I;
1347 DEBUG(dbgs() << " Visiting " << *Inst << "\n");
1349 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
1352 DEBUG(llvm::dbgs() << "\nState Before Checking for CFG Hazards:\n"
1353 << BBStates[BB] << "\n\n");
1354 CheckForCFGHazards(BB, BBStates, MyStates);
1355 DEBUG(llvm::dbgs() << "Final State:\n" << BBStates[BB] << "\n");
1356 return NestingDetected;
1360 ComputePostOrders(Function &F,
1361 SmallVectorImpl<BasicBlock *> &PostOrder,
1362 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1363 unsigned NoObjCARCExceptionsMDKind,
1364 DenseMap<const BasicBlock *, BBState> &BBStates) {
1365 /// The visited set, for doing DFS walks.
1366 SmallPtrSet<BasicBlock *, 16> Visited;
1368 // Do DFS, computing the PostOrder.
1369 SmallPtrSet<BasicBlock *, 16> OnStack;
1370 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1372 // Functions always have exactly one entry block, and we don't have
1373 // any other block that we treat like an entry block.
1374 BasicBlock *EntryBB = &F.getEntryBlock();
1375 BBState &MyStates = BBStates[EntryBB];
1376 MyStates.SetAsEntry();
1377 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1378 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1379 Visited.insert(EntryBB);
1380 OnStack.insert(EntryBB);
1383 BasicBlock *CurrBB = SuccStack.back().first;
1384 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1385 succ_iterator SE(TI, false);
1387 while (SuccStack.back().second != SE) {
1388 BasicBlock *SuccBB = *SuccStack.back().second++;
1389 if (Visited.insert(SuccBB).second) {
1390 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1391 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1392 BBStates[CurrBB].addSucc(SuccBB);
1393 BBState &SuccStates = BBStates[SuccBB];
1394 SuccStates.addPred(CurrBB);
1395 OnStack.insert(SuccBB);
1399 if (!OnStack.count(SuccBB)) {
1400 BBStates[CurrBB].addSucc(SuccBB);
1401 BBStates[SuccBB].addPred(CurrBB);
1404 OnStack.erase(CurrBB);
1405 PostOrder.push_back(CurrBB);
1406 SuccStack.pop_back();
1407 } while (!SuccStack.empty());
1411 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1412 // Functions may have many exits, and there also blocks which we treat
1413 // as exits due to ignored edges.
1414 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1415 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1416 BasicBlock *ExitBB = I;
1417 BBState &MyStates = BBStates[ExitBB];
1418 if (!MyStates.isExit())
1421 MyStates.SetAsExit();
1423 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
1424 Visited.insert(ExitBB);
1425 while (!PredStack.empty()) {
1426 reverse_dfs_next_succ:
1427 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1428 while (PredStack.back().second != PE) {
1429 BasicBlock *BB = *PredStack.back().second++;
1430 if (Visited.insert(BB).second) {
1431 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1432 goto reverse_dfs_next_succ;
1435 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1440 // Visit the function both top-down and bottom-up.
1441 bool ObjCARCOpt::Visit(Function &F,
1442 DenseMap<const BasicBlock *, BBState> &BBStates,
1443 BlotMapVector<Value *, RRInfo> &Retains,
1444 DenseMap<Value *, RRInfo> &Releases) {
1446 // Use reverse-postorder traversals, because we magically know that loops
1447 // will be well behaved, i.e. they won't repeatedly call retain on a single
1448 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1449 // class here because we want the reverse-CFG postorder to consider each
1450 // function exit point, and we want to ignore selected cycle edges.
1451 SmallVector<BasicBlock *, 16> PostOrder;
1452 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1453 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1454 MDKindCache.get(ARCMDKindID::NoObjCARCExceptions),
1457 // Use reverse-postorder on the reverse CFG for bottom-up.
1458 bool BottomUpNestingDetected = false;
1459 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1460 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
1462 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
1464 // Use reverse-postorder for top-down.
1465 bool TopDownNestingDetected = false;
1466 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1467 PostOrder.rbegin(), E = PostOrder.rend();
1469 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
1471 return TopDownNestingDetected && BottomUpNestingDetected;
1474 /// Move the calls in RetainsToMove and ReleasesToMove.
1475 void ObjCARCOpt::MoveCalls(Value *Arg, RRInfo &RetainsToMove,
1476 RRInfo &ReleasesToMove,
1477 BlotMapVector<Value *, RRInfo> &Retains,
1478 DenseMap<Value *, RRInfo> &Releases,
1479 SmallVectorImpl<Instruction *> &DeadInsts,
1481 Type *ArgTy = Arg->getType();
1482 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1484 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
1486 // Insert the new retain and release calls.
1487 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
1488 Value *MyArg = ArgTy == ParamTy ? Arg :
1489 new BitCastInst(Arg, ParamTy, "", InsertPt);
1490 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1491 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1492 Call->setDoesNotThrow();
1493 Call->setTailCall();
1495 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
1496 "At insertion point: " << *InsertPt << "\n");
1498 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
1499 Value *MyArg = ArgTy == ParamTy ? Arg :
1500 new BitCastInst(Arg, ParamTy, "", InsertPt);
1501 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Release);
1502 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
1503 // Attach a clang.imprecise_release metadata tag, if appropriate.
1504 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
1505 Call->setMetadata(MDKindCache.get(ARCMDKindID::ImpreciseRelease), M);
1506 Call->setDoesNotThrow();
1507 if (ReleasesToMove.IsTailCallRelease)
1508 Call->setTailCall();
1510 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
1511 "At insertion point: " << *InsertPt << "\n");
1514 // Delete the original retain and release calls.
1515 for (Instruction *OrigRetain : RetainsToMove.Calls) {
1516 Retains.blot(OrigRetain);
1517 DeadInsts.push_back(OrigRetain);
1518 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
1520 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
1521 Releases.erase(OrigRelease);
1522 DeadInsts.push_back(OrigRelease);
1523 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
1528 bool ObjCARCOpt::PairUpRetainsAndReleases(
1529 DenseMap<const BasicBlock *, BBState> &BBStates,
1530 BlotMapVector<Value *, RRInfo> &Retains,
1531 DenseMap<Value *, RRInfo> &Releases, Module *M,
1532 SmallVectorImpl<Instruction *> &NewRetains,
1533 SmallVectorImpl<Instruction *> &NewReleases,
1534 SmallVectorImpl<Instruction *> &DeadInsts, RRInfo &RetainsToMove,
1535 RRInfo &ReleasesToMove, Value *Arg, bool KnownSafe,
1536 bool &AnyPairsCompletelyEliminated) {
1537 // If a pair happens in a region where it is known that the reference count
1538 // is already incremented, we can similarly ignore possible decrements unless
1539 // we are dealing with a retainable object with multiple provenance sources.
1540 bool KnownSafeTD = true, KnownSafeBU = true;
1541 bool MultipleOwners = false;
1542 bool CFGHazardAfflicted = false;
1544 // Connect the dots between the top-down-collected RetainsToMove and
1545 // bottom-up-collected ReleasesToMove to form sets of related calls.
1546 // This is an iterative process so that we connect multiple releases
1547 // to multiple retains if needed.
1548 unsigned OldDelta = 0;
1549 unsigned NewDelta = 0;
1550 unsigned OldCount = 0;
1551 unsigned NewCount = 0;
1552 bool FirstRelease = true;
1554 for (SmallVectorImpl<Instruction *>::const_iterator
1555 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
1556 Instruction *NewRetain = *NI;
1557 auto It = Retains.find(NewRetain);
1558 assert(It != Retains.end());
1559 const RRInfo &NewRetainRRI = It->second;
1560 KnownSafeTD &= NewRetainRRI.KnownSafe;
1562 MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
1563 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
1564 auto Jt = Releases.find(NewRetainRelease);
1565 if (Jt == Releases.end())
1567 const RRInfo &NewRetainReleaseRRI = Jt->second;
1569 // If the release does not have a reference to the retain as well,
1570 // something happened which is unaccounted for. Do not do anything.
1572 // This can happen if we catch an additive overflow during path count
1574 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
1577 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
1579 // If we overflow when we compute the path count, don't remove/move
1581 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
1582 unsigned PathCount = BBState::OverflowOccurredValue;
1583 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1585 assert(PathCount != BBState::OverflowOccurredValue &&
1586 "PathCount at this point can not be "
1587 "OverflowOccurredValue.");
1588 OldDelta -= PathCount;
1590 // Merge the ReleaseMetadata and IsTailCallRelease values.
1592 ReleasesToMove.ReleaseMetadata =
1593 NewRetainReleaseRRI.ReleaseMetadata;
1594 ReleasesToMove.IsTailCallRelease =
1595 NewRetainReleaseRRI.IsTailCallRelease;
1596 FirstRelease = false;
1598 if (ReleasesToMove.ReleaseMetadata !=
1599 NewRetainReleaseRRI.ReleaseMetadata)
1600 ReleasesToMove.ReleaseMetadata = nullptr;
1601 if (ReleasesToMove.IsTailCallRelease !=
1602 NewRetainReleaseRRI.IsTailCallRelease)
1603 ReleasesToMove.IsTailCallRelease = false;
1606 // Collect the optimal insertion points.
1608 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
1609 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
1610 // If we overflow when we compute the path count, don't
1611 // remove/move anything.
1612 const BBState &RIPBBState = BBStates[RIP->getParent()];
1613 PathCount = BBState::OverflowOccurredValue;
1614 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1616 assert(PathCount != BBState::OverflowOccurredValue &&
1617 "PathCount at this point can not be "
1618 "OverflowOccurredValue.");
1619 NewDelta -= PathCount;
1622 NewReleases.push_back(NewRetainRelease);
1627 if (NewReleases.empty()) break;
1629 // Back the other way.
1630 for (SmallVectorImpl<Instruction *>::const_iterator
1631 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
1632 Instruction *NewRelease = *NI;
1633 auto It = Releases.find(NewRelease);
1634 assert(It != Releases.end());
1635 const RRInfo &NewReleaseRRI = It->second;
1636 KnownSafeBU &= NewReleaseRRI.KnownSafe;
1637 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
1638 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
1639 auto Jt = Retains.find(NewReleaseRetain);
1640 if (Jt == Retains.end())
1642 const RRInfo &NewReleaseRetainRRI = Jt->second;
1644 // If the retain does not have a reference to the release as well,
1645 // something happened which is unaccounted for. Do not do anything.
1647 // This can happen if we catch an additive overflow during path count
1649 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
1652 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
1653 // If we overflow when we compute the path count, don't remove/move
1655 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
1656 unsigned PathCount = BBState::OverflowOccurredValue;
1657 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
1659 assert(PathCount != BBState::OverflowOccurredValue &&
1660 "PathCount at this point can not be "
1661 "OverflowOccurredValue.");
1662 OldDelta += PathCount;
1663 OldCount += PathCount;
1665 // Collect the optimal insertion points.
1667 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
1668 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
1669 // If we overflow when we compute the path count, don't
1670 // remove/move anything.
1671 const BBState &RIPBBState = BBStates[RIP->getParent()];
1673 PathCount = BBState::OverflowOccurredValue;
1674 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
1676 assert(PathCount != BBState::OverflowOccurredValue &&
1677 "PathCount at this point can not be "
1678 "OverflowOccurredValue.");
1679 NewDelta += PathCount;
1680 NewCount += PathCount;
1683 NewRetains.push_back(NewReleaseRetain);
1687 NewReleases.clear();
1688 if (NewRetains.empty()) break;
1691 // We can only remove pointers if we are known safe in both directions.
1692 bool UnconditionallySafe = KnownSafeTD && KnownSafeBU;
1693 if (UnconditionallySafe) {
1694 RetainsToMove.ReverseInsertPts.clear();
1695 ReleasesToMove.ReverseInsertPts.clear();
1698 // Determine whether the new insertion points we computed preserve the
1699 // balance of retain and release calls through the program.
1700 // TODO: If the fully aggressive solution isn't valid, try to find a
1701 // less aggressive solution which is.
1705 // At this point, we are not going to remove any RR pairs, but we still are
1706 // able to move RR pairs. If one of our pointers is afflicted with
1707 // CFGHazards, we cannot perform such code motion so exit early.
1708 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
1709 ReleasesToMove.ReverseInsertPts.size();
1710 if (CFGHazardAfflicted && WillPerformCodeMotion)
1714 // Determine whether the original call points are balanced in the retain and
1715 // release calls through the program. If not, conservatively don't touch
1717 // TODO: It's theoretically possible to do code motion in this case, as
1718 // long as the existing imbalances are maintained.
1723 assert(OldCount != 0 && "Unreachable code?");
1724 NumRRs += OldCount - NewCount;
1725 // Set to true if we completely removed any RR pairs.
1726 AnyPairsCompletelyEliminated = NewCount == 0;
1728 // We can move calls!
1732 /// Identify pairings between the retains and releases, and delete and/or move
1734 bool ObjCARCOpt::PerformCodePlacement(
1735 DenseMap<const BasicBlock *, BBState> &BBStates,
1736 BlotMapVector<Value *, RRInfo> &Retains,
1737 DenseMap<Value *, RRInfo> &Releases, Module *M) {
1738 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
1740 bool AnyPairsCompletelyEliminated = false;
1741 RRInfo RetainsToMove;
1742 RRInfo ReleasesToMove;
1743 SmallVector<Instruction *, 4> NewRetains;
1744 SmallVector<Instruction *, 4> NewReleases;
1745 SmallVector<Instruction *, 8> DeadInsts;
1747 // Visit each retain.
1748 for (BlotMapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
1751 Value *V = I->first;
1752 if (!V) continue; // blotted
1754 Instruction *Retain = cast<Instruction>(V);
1756 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
1758 Value *Arg = GetArgRCIdentityRoot(Retain);
1760 // If the object being released is in static or stack storage, we know it's
1761 // not being managed by ObjC reference counting, so we can delete pairs
1762 // regardless of what possible decrements or uses lie between them.
1763 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
1765 // A constant pointer can't be pointing to an object on the heap. It may
1766 // be reference-counted, but it won't be deleted.
1767 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
1768 if (const GlobalVariable *GV =
1769 dyn_cast<GlobalVariable>(
1770 GetRCIdentityRoot(LI->getPointerOperand())))
1771 if (GV->isConstant())
1774 // Connect the dots between the top-down-collected RetainsToMove and
1775 // bottom-up-collected ReleasesToMove to form sets of related calls.
1776 NewRetains.push_back(Retain);
1777 bool PerformMoveCalls = PairUpRetainsAndReleases(
1778 BBStates, Retains, Releases, M, NewRetains, NewReleases, DeadInsts,
1779 RetainsToMove, ReleasesToMove, Arg, KnownSafe,
1780 AnyPairsCompletelyEliminated);
1782 if (PerformMoveCalls) {
1783 // Ok, everything checks out and we're all set. Let's move/delete some
1785 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
1786 Retains, Releases, DeadInsts, M);
1789 // Clean up state for next retain.
1790 NewReleases.clear();
1792 RetainsToMove.clear();
1793 ReleasesToMove.clear();
1796 // Now that we're done moving everything, we can delete the newly dead
1797 // instructions, as we no longer need them as insert points.
1798 while (!DeadInsts.empty())
1799 EraseInstruction(DeadInsts.pop_back_val());
1801 return AnyPairsCompletelyEliminated;
1804 /// Weak pointer optimizations.
1805 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
1806 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
1808 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
1809 // itself because it uses AliasAnalysis and we need to do provenance
1811 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1812 Instruction *Inst = &*I++;
1814 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
1816 ARCInstKind Class = GetBasicARCInstKind(Inst);
1817 if (Class != ARCInstKind::LoadWeak &&
1818 Class != ARCInstKind::LoadWeakRetained)
1821 // Delete objc_loadWeak calls with no users.
1822 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
1823 Inst->eraseFromParent();
1827 // TODO: For now, just look for an earlier available version of this value
1828 // within the same block. Theoretically, we could do memdep-style non-local
1829 // analysis too, but that would want caching. A better approach would be to
1830 // use the technique that EarlyCSE uses.
1831 inst_iterator Current = std::prev(I);
1832 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
1833 for (BasicBlock::iterator B = CurrentBB->begin(),
1834 J = Current.getInstructionIterator();
1836 Instruction *EarlierInst = &*std::prev(J);
1837 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
1838 switch (EarlierClass) {
1839 case ARCInstKind::LoadWeak:
1840 case ARCInstKind::LoadWeakRetained: {
1841 // If this is loading from the same pointer, replace this load's value
1843 CallInst *Call = cast<CallInst>(Inst);
1844 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1845 Value *Arg = Call->getArgOperand(0);
1846 Value *EarlierArg = EarlierCall->getArgOperand(0);
1847 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1848 case AliasAnalysis::MustAlias:
1850 // If the load has a builtin retain, insert a plain retain for it.
1851 if (Class == ARCInstKind::LoadWeakRetained) {
1852 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1853 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1856 // Zap the fully redundant load.
1857 Call->replaceAllUsesWith(EarlierCall);
1858 Call->eraseFromParent();
1860 case AliasAnalysis::MayAlias:
1861 case AliasAnalysis::PartialAlias:
1863 case AliasAnalysis::NoAlias:
1868 case ARCInstKind::StoreWeak:
1869 case ARCInstKind::InitWeak: {
1870 // If this is storing to the same pointer and has the same size etc.
1871 // replace this load's value with the stored value.
1872 CallInst *Call = cast<CallInst>(Inst);
1873 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
1874 Value *Arg = Call->getArgOperand(0);
1875 Value *EarlierArg = EarlierCall->getArgOperand(0);
1876 switch (PA.getAA()->alias(Arg, EarlierArg)) {
1877 case AliasAnalysis::MustAlias:
1879 // If the load has a builtin retain, insert a plain retain for it.
1880 if (Class == ARCInstKind::LoadWeakRetained) {
1881 Constant *Decl = EP.get(ARCRuntimeEntryPointKind::Retain);
1882 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
1885 // Zap the fully redundant load.
1886 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
1887 Call->eraseFromParent();
1889 case AliasAnalysis::MayAlias:
1890 case AliasAnalysis::PartialAlias:
1892 case AliasAnalysis::NoAlias:
1897 case ARCInstKind::MoveWeak:
1898 case ARCInstKind::CopyWeak:
1899 // TOOD: Grab the copied value.
1901 case ARCInstKind::AutoreleasepoolPush:
1902 case ARCInstKind::None:
1903 case ARCInstKind::IntrinsicUser:
1904 case ARCInstKind::User:
1905 // Weak pointers are only modified through the weak entry points
1906 // (and arbitrary calls, which could call the weak entry points).
1909 // Anything else could modify the weak pointer.
1916 // Then, for each destroyWeak with an alloca operand, check to see if
1917 // the alloca and all its users can be zapped.
1918 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1919 Instruction *Inst = &*I++;
1920 ARCInstKind Class = GetBasicARCInstKind(Inst);
1921 if (Class != ARCInstKind::DestroyWeak)
1924 CallInst *Call = cast<CallInst>(Inst);
1925 Value *Arg = Call->getArgOperand(0);
1926 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
1927 for (User *U : Alloca->users()) {
1928 const Instruction *UserInst = cast<Instruction>(U);
1929 switch (GetBasicARCInstKind(UserInst)) {
1930 case ARCInstKind::InitWeak:
1931 case ARCInstKind::StoreWeak:
1932 case ARCInstKind::DestroyWeak:
1939 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
1940 CallInst *UserInst = cast<CallInst>(*UI++);
1941 switch (GetBasicARCInstKind(UserInst)) {
1942 case ARCInstKind::InitWeak:
1943 case ARCInstKind::StoreWeak:
1944 // These functions return their second argument.
1945 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
1947 case ARCInstKind::DestroyWeak:
1951 llvm_unreachable("alloca really is used!");
1953 UserInst->eraseFromParent();
1955 Alloca->eraseFromParent();
1961 /// Identify program paths which execute sequences of retains and releases which
1962 /// can be eliminated.
1963 bool ObjCARCOpt::OptimizeSequences(Function &F) {
1964 // Releases, Retains - These are used to store the results of the main flow
1965 // analysis. These use Value* as the key instead of Instruction* so that the
1966 // map stays valid when we get around to rewriting code and calls get
1967 // replaced by arguments.
1968 DenseMap<Value *, RRInfo> Releases;
1969 BlotMapVector<Value *, RRInfo> Retains;
1971 // This is used during the traversal of the function to track the
1972 // states for each identified object at each block.
1973 DenseMap<const BasicBlock *, BBState> BBStates;
1975 // Analyze the CFG of the function, and all instructions.
1976 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
1979 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
1984 MultiOwnersSet.clear();
1986 return AnyPairsCompletelyEliminated && NestingDetected;
1989 /// Check if there is a dependent call earlier that does not have anything in
1990 /// between the Retain and the call that can affect the reference count of their
1991 /// shared pointer argument. Note that Retain need not be in BB.
1993 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
1994 SmallPtrSetImpl<Instruction *> &DepInsts,
1995 SmallPtrSetImpl<const BasicBlock *> &Visited,
1996 ProvenanceAnalysis &PA) {
1997 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
1998 DepInsts, Visited, PA);
1999 if (DepInsts.size() != 1)
2002 auto *Call = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2004 // Check that the pointer is the return value of the call.
2005 if (!Call || Arg != Call)
2008 // Check that the call is a regular call.
2009 ARCInstKind Class = GetBasicARCInstKind(Call);
2010 if (Class != ARCInstKind::CallOrUser && Class != ARCInstKind::Call)
2016 /// Find a dependent retain that precedes the given autorelease for which there
2017 /// is nothing in between the two instructions that can affect the ref count of
2020 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2021 Instruction *Autorelease,
2022 SmallPtrSetImpl<Instruction *> &DepInsts,
2023 SmallPtrSetImpl<const BasicBlock *> &Visited,
2024 ProvenanceAnalysis &PA) {
2025 FindDependencies(CanChangeRetainCount, Arg,
2026 BB, Autorelease, DepInsts, Visited, PA);
2027 if (DepInsts.size() != 1)
2030 auto *Retain = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2032 // Check that we found a retain with the same argument.
2033 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2034 GetArgRCIdentityRoot(Retain) != Arg) {
2041 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2042 /// no instructions dependent on Arg that need a positive ref count in between
2043 /// the autorelease and the ret.
2045 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2047 SmallPtrSetImpl<Instruction *> &DepInsts,
2048 SmallPtrSetImpl<const BasicBlock *> &V,
2049 ProvenanceAnalysis &PA) {
2050 FindDependencies(NeedsPositiveRetainCount, Arg,
2051 BB, Ret, DepInsts, V, PA);
2052 if (DepInsts.size() != 1)
2055 auto *Autorelease = dyn_cast_or_null<CallInst>(*DepInsts.begin());
2058 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2059 if (!IsAutorelease(AutoreleaseClass))
2061 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2067 /// Look for this pattern:
2069 /// %call = call i8* @something(...)
2070 /// %2 = call i8* @objc_retain(i8* %call)
2071 /// %3 = call i8* @objc_autorelease(i8* %2)
2074 /// And delete the retain and autorelease.
2075 void ObjCARCOpt::OptimizeReturns(Function &F) {
2076 if (!F.getReturnType()->isPointerTy())
2079 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2081 SmallPtrSet<Instruction *, 4> DependingInstructions;
2082 SmallPtrSet<const BasicBlock *, 4> Visited;
2083 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2084 BasicBlock *BB = FI;
2085 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2087 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2092 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2094 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2095 // dependent on Arg such that there are no instructions dependent on Arg
2096 // that need a positive ref count in between the autorelease and Ret.
2097 CallInst *Autorelease =
2098 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2099 DependingInstructions, Visited,
2101 DependingInstructions.clear();
2108 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2109 DependingInstructions, Visited, PA);
2110 DependingInstructions.clear();
2116 // Check that there is nothing that can affect the reference count
2117 // between the retain and the call. Note that Retain need not be in BB.
2118 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2119 DependingInstructions,
2121 DependingInstructions.clear();
2124 if (!HasSafePathToCall)
2127 // If so, we can zap the retain and autorelease.
2130 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2131 << *Autorelease << "\n");
2132 EraseInstruction(Retain);
2133 EraseInstruction(Autorelease);
2139 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2140 llvm::Statistic &NumRetains =
2141 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2142 llvm::Statistic &NumReleases =
2143 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2145 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2146 Instruction *Inst = &*I++;
2147 switch (GetBasicARCInstKind(Inst)) {
2150 case ARCInstKind::Retain:
2153 case ARCInstKind::Release:
2161 bool ObjCARCOpt::doInitialization(Module &M) {
2165 // If nothing in the Module uses ARC, don't do anything.
2166 Run = ModuleHasARC(M);
2170 // Intuitively, objc_retain and others are nocapture, however in practice
2171 // they are not, because they return their argument value. And objc_release
2172 // calls finalizers which can have arbitrary side effects.
2173 MDKindCache.init(&M);
2175 // Initialize our runtime entry point cache.
2181 bool ObjCARCOpt::runOnFunction(Function &F) {
2185 // If nothing in the Module uses ARC, don't do anything.
2191 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
2194 PA.setAA(&getAnalysis<AliasAnalysis>());
2197 if (AreStatisticsEnabled()) {
2198 GatherStatistics(F, false);
2202 // This pass performs several distinct transformations. As a compile-time aid
2203 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2204 // library functions aren't declared.
2206 // Preliminary optimizations. This also computes UsedInThisFunction.
2207 OptimizeIndividualCalls(F);
2209 // Optimizations for weak pointers.
2210 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
2211 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
2212 (1 << unsigned(ARCInstKind::StoreWeak)) |
2213 (1 << unsigned(ARCInstKind::InitWeak)) |
2214 (1 << unsigned(ARCInstKind::CopyWeak)) |
2215 (1 << unsigned(ARCInstKind::MoveWeak)) |
2216 (1 << unsigned(ARCInstKind::DestroyWeak))))
2217 OptimizeWeakCalls(F);
2219 // Optimizations for retain+release pairs.
2220 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
2221 (1 << unsigned(ARCInstKind::RetainRV)) |
2222 (1 << unsigned(ARCInstKind::RetainBlock))))
2223 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
2224 // Run OptimizeSequences until it either stops making changes or
2225 // no retain+release pair nesting is detected.
2226 while (OptimizeSequences(F)) {}
2228 // Optimizations if objc_autorelease is used.
2229 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
2230 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
2233 // Gather statistics after optimization.
2235 if (AreStatisticsEnabled()) {
2236 GatherStatistics(F, true);
2240 DEBUG(dbgs() << "\n");
2245 void ObjCARCOpt::releaseMemory() {