1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, pattern-matching and replacement of
17 /// low-level operations into higher-level operations, and numerous minor
20 /// This file also defines a simple ARC-aware AliasAnalysis.
22 /// WARNING: This file knows about certain library functions. It recognizes them
23 /// by name, and hardwires knowledge of their semantics.
25 /// WARNING: This file knows about how certain Objective-C library functions are
26 /// used. Naive LLVM IR transformations which would otherwise be
27 /// behavior-preserving may break these assumptions.
29 //===----------------------------------------------------------------------===//
31 #define DEBUG_TYPE "objc-arc-opts"
33 #include "DependencyAnalysis.h"
34 #include "ObjCARCAliasAnalysis.h"
35 #include "ProvenanceAnalysis.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/STLExtras.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/Support/CFG.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/raw_ostream.h"
46 using namespace llvm::objcarc;
48 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
52 /// \brief An associative container with fast insertion-order (deterministic)
53 /// iteration over its elements. Plus the special blot operation.
54 template<class KeyT, class ValueT>
56 /// Map keys to indices in Vector.
57 typedef DenseMap<KeyT, size_t> MapTy;
60 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
65 typedef typename VectorTy::iterator iterator;
66 typedef typename VectorTy::const_iterator const_iterator;
67 iterator begin() { return Vector.begin(); }
68 iterator end() { return Vector.end(); }
69 const_iterator begin() const { return Vector.begin(); }
70 const_iterator end() const { return Vector.end(); }
74 assert(Vector.size() >= Map.size()); // May differ due to blotting.
75 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
77 assert(I->second < Vector.size());
78 assert(Vector[I->second].first == I->first);
80 for (typename VectorTy::const_iterator I = Vector.begin(),
81 E = Vector.end(); I != E; ++I)
83 (Map.count(I->first) &&
84 Map[I->first] == size_t(I - Vector.begin())));
88 ValueT &operator[](const KeyT &Arg) {
89 std::pair<typename MapTy::iterator, bool> Pair =
90 Map.insert(std::make_pair(Arg, size_t(0)));
92 size_t Num = Vector.size();
93 Pair.first->second = Num;
94 Vector.push_back(std::make_pair(Arg, ValueT()));
95 return Vector[Num].second;
97 return Vector[Pair.first->second].second;
100 std::pair<iterator, bool>
101 insert(const std::pair<KeyT, ValueT> &InsertPair) {
102 std::pair<typename MapTy::iterator, bool> Pair =
103 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
105 size_t Num = Vector.size();
106 Pair.first->second = Num;
107 Vector.push_back(InsertPair);
108 return std::make_pair(Vector.begin() + Num, true);
110 return std::make_pair(Vector.begin() + Pair.first->second, false);
113 const_iterator find(const KeyT &Key) const {
114 typename MapTy::const_iterator It = Map.find(Key);
115 if (It == Map.end()) return Vector.end();
116 return Vector.begin() + It->second;
119 /// This is similar to erase, but instead of removing the element from the
120 /// vector, it just zeros out the key in the vector. This leaves iterators
121 /// intact, but clients must be prepared for zeroed-out keys when iterating.
122 void blot(const KeyT &Key) {
123 typename MapTy::iterator It = Map.find(Key);
124 if (It == Map.end()) return;
125 Vector[It->second].first = KeyT();
138 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
141 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
142 /// as it finds a value with multiple uses.
143 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
144 if (Arg->hasOneUse()) {
145 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
146 return FindSingleUseIdentifiedObject(BC->getOperand(0));
147 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
148 if (GEP->hasAllZeroIndices())
149 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
150 if (IsForwarding(GetBasicInstructionClass(Arg)))
151 return FindSingleUseIdentifiedObject(
152 cast<CallInst>(Arg)->getArgOperand(0));
153 if (!IsObjCIdentifiedObject(Arg))
158 // If we found an identifiable object but it has multiple uses, but they are
159 // trivial uses, we can still consider this to be a single-use value.
160 if (IsObjCIdentifiedObject(Arg)) {
161 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
164 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
174 /// \brief Test whether the given retainable object pointer escapes.
176 /// This differs from regular escape analysis in that a use as an
177 /// argument to a call is not considered an escape.
179 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
181 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
183 // Walk the def-use chains.
184 SmallVector<const Value *, 4> Worklist;
185 Worklist.push_back(Ptr);
186 // If Ptr has any operands add them as well.
187 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E; ++I) {
188 Worklist.push_back(*I);
191 // Ensure we do not visit any value twice.
192 SmallPtrSet<const Value *, 8> VisitedSet;
195 const Value *V = Worklist.pop_back_val();
197 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Visiting: " << *V << "\n");
199 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
201 const User *UUser = *UI;
203 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User: " << *UUser << "\n");
205 // Special - Use by a call (callee or argument) is not considered
207 switch (GetBasicInstructionClass(UUser)) {
212 case IC_AutoreleaseRV: {
213 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies pointer arguments. "
215 // These special functions make copies of their pointer arguments.
220 // Use by an instruction which copies the value is an escape if the
221 // result is an escape.
222 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
223 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
225 if (!VisitedSet.insert(UUser)) {
226 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies value. Escapes "
227 "if result escapes. Adding to list.\n");
228 Worklist.push_back(UUser);
230 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Already visited node.\n");
234 // Use by a load is not an escape.
235 if (isa<LoadInst>(UUser))
237 // Use by a store is not an escape if the use is the address.
238 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
239 if (V != SI->getValueOperand())
243 // Regular calls and other stuff are not considered escapes.
246 // Otherwise, conservatively assume an escape.
247 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Assuming block escapes.\n");
250 } while (!Worklist.empty());
253 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Block does not escape.\n");
259 /// \defgroup ARCOpt ARC Optimization.
262 // TODO: On code like this:
265 // stuff_that_cannot_release()
266 // objc_autorelease(%x)
267 // stuff_that_cannot_release()
269 // stuff_that_cannot_release()
270 // objc_autorelease(%x)
272 // The second retain and autorelease can be deleted.
274 // TODO: It should be possible to delete
275 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
276 // pairs if nothing is actually autoreleased between them. Also, autorelease
277 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
278 // after inlining) can be turned into plain release calls.
280 // TODO: Critical-edge splitting. If the optimial insertion point is
281 // a critical edge, the current algorithm has to fail, because it doesn't
282 // know how to split edges. It should be possible to make the optimizer
283 // think in terms of edges, rather than blocks, and then split critical
286 // TODO: OptimizeSequences could generalized to be Interprocedural.
288 // TODO: Recognize that a bunch of other objc runtime calls have
289 // non-escaping arguments and non-releasing arguments, and may be
290 // non-autoreleasing.
292 // TODO: Sink autorelease calls as far as possible. Unfortunately we
293 // usually can't sink them past other calls, which would be the main
294 // case where it would be useful.
296 // TODO: The pointer returned from objc_loadWeakRetained is retained.
298 // TODO: Delete release+retain pairs (rare).
300 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
301 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
302 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
303 STATISTIC(NumRets, "Number of return value forwarding "
304 "retain+autoreleaes eliminated");
305 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
306 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
311 /// \brief A sequence of states that a pointer may go through in which an
312 /// objc_retain and objc_release are actually needed.
315 S_Retain, ///< objc_retain(x)
316 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement
317 S_Use, ///< any use of x
318 S_Stop, ///< like S_Release, but code motion is stopped
319 S_Release, ///< objc_release(x)
320 S_MovableRelease ///< objc_release(x), !clang.imprecise_release
324 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
328 if (A == S_None || B == S_None)
331 if (A > B) std::swap(A, B);
333 // Choose the side which is further along in the sequence.
334 if ((A == S_Retain || A == S_CanRelease) &&
335 (B == S_CanRelease || B == S_Use))
338 // Choose the side which is further along in the sequence.
339 if ((A == S_Use || A == S_CanRelease) &&
340 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
342 // If both sides are releases, choose the more conservative one.
343 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
345 if (A == S_Release && B == S_MovableRelease)
353 /// \brief Unidirectional information about either a
354 /// retain-decrement-use-release sequence or release-use-decrement-retain
355 /// reverese sequence.
357 /// After an objc_retain, the reference count of the referenced
358 /// object is known to be positive. Similarly, before an objc_release, the
359 /// reference count of the referenced object is known to be positive. If
360 /// there are retain-release pairs in code regions where the retain count
361 /// is known to be positive, they can be eliminated, regardless of any side
362 /// effects between them.
364 /// Also, a retain+release pair nested within another retain+release
365 /// pair all on the known same pointer value can be eliminated, regardless
366 /// of any intervening side effects.
368 /// KnownSafe is true when either of these conditions is satisfied.
371 /// True if the Calls are objc_retainBlock calls (as opposed to objc_retain
375 /// True of the objc_release calls are all marked with the "tail" keyword.
376 bool IsTailCallRelease;
378 /// If the Calls are objc_release calls and they all have a
379 /// clang.imprecise_release tag, this is the metadata tag.
380 MDNode *ReleaseMetadata;
382 /// For a top-down sequence, the set of objc_retains or
383 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
384 SmallPtrSet<Instruction *, 2> Calls;
386 /// The set of optimal insert positions for moving calls in the opposite
388 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
391 KnownSafe(false), IsRetainBlock(false),
392 IsTailCallRelease(false),
393 ReleaseMetadata(0) {}
399 void RRInfo::clear() {
401 IsRetainBlock = false;
402 IsTailCallRelease = false;
405 ReverseInsertPts.clear();
409 /// \brief This class summarizes several per-pointer runtime properties which
410 /// are propogated through the flow graph.
412 /// True if the reference count is known to be incremented.
413 bool KnownPositiveRefCount;
415 /// True of we've seen an opportunity for partial RR elimination, such as
416 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
419 /// The current position in the sequence.
423 /// Unidirectional information about the current sequence.
425 /// TODO: Encapsulate this better.
428 PtrState() : KnownPositiveRefCount(false), Partial(false),
431 void SetKnownPositiveRefCount() {
432 KnownPositiveRefCount = true;
435 void ClearRefCount() {
436 KnownPositiveRefCount = false;
439 bool IsKnownIncremented() const {
440 return KnownPositiveRefCount;
443 void SetSeq(Sequence NewSeq) {
447 Sequence GetSeq() const {
451 void ClearSequenceProgress() {
452 ResetSequenceProgress(S_None);
455 void ResetSequenceProgress(Sequence NewSeq) {
461 void Merge(const PtrState &Other, bool TopDown);
466 PtrState::Merge(const PtrState &Other, bool TopDown) {
467 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
468 KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
470 // We can't merge a plain objc_retain with an objc_retainBlock.
471 if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock)
474 // If we're not in a sequence (anymore), drop all associated state.
478 } else if (Partial || Other.Partial) {
479 // If we're doing a merge on a path that's previously seen a partial
480 // merge, conservatively drop the sequence, to avoid doing partial
481 // RR elimination. If the branch predicates for the two merge differ,
482 // mixing them is unsafe.
483 ClearSequenceProgress();
485 // Conservatively merge the ReleaseMetadata information.
486 if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
487 RRI.ReleaseMetadata = 0;
489 RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
490 RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
491 Other.RRI.IsTailCallRelease;
492 RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
494 // Merge the insert point sets. If there are any differences,
495 // that makes this a partial merge.
496 Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
497 for (SmallPtrSet<Instruction *, 2>::const_iterator
498 I = Other.RRI.ReverseInsertPts.begin(),
499 E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
500 Partial |= RRI.ReverseInsertPts.insert(*I);
505 /// \brief Per-BasicBlock state.
507 /// The number of unique control paths from the entry which can reach this
509 unsigned TopDownPathCount;
511 /// The number of unique control paths to exits from this block.
512 unsigned BottomUpPathCount;
514 /// A type for PerPtrTopDown and PerPtrBottomUp.
515 typedef MapVector<const Value *, PtrState> MapTy;
517 /// The top-down traversal uses this to record information known about a
518 /// pointer at the bottom of each block.
521 /// The bottom-up traversal uses this to record information known about a
522 /// pointer at the top of each block.
523 MapTy PerPtrBottomUp;
525 /// Effective predecessors of the current block ignoring ignorable edges and
526 /// ignored backedges.
527 SmallVector<BasicBlock *, 2> Preds;
528 /// Effective successors of the current block ignoring ignorable edges and
529 /// ignored backedges.
530 SmallVector<BasicBlock *, 2> Succs;
533 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
535 typedef MapTy::iterator ptr_iterator;
536 typedef MapTy::const_iterator ptr_const_iterator;
538 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
539 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
540 ptr_const_iterator top_down_ptr_begin() const {
541 return PerPtrTopDown.begin();
543 ptr_const_iterator top_down_ptr_end() const {
544 return PerPtrTopDown.end();
547 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
548 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
549 ptr_const_iterator bottom_up_ptr_begin() const {
550 return PerPtrBottomUp.begin();
552 ptr_const_iterator bottom_up_ptr_end() const {
553 return PerPtrBottomUp.end();
556 /// Mark this block as being an entry block, which has one path from the
557 /// entry by definition.
558 void SetAsEntry() { TopDownPathCount = 1; }
560 /// Mark this block as being an exit block, which has one path to an exit by
562 void SetAsExit() { BottomUpPathCount = 1; }
564 PtrState &getPtrTopDownState(const Value *Arg) {
565 return PerPtrTopDown[Arg];
568 PtrState &getPtrBottomUpState(const Value *Arg) {
569 return PerPtrBottomUp[Arg];
572 void clearBottomUpPointers() {
573 PerPtrBottomUp.clear();
576 void clearTopDownPointers() {
577 PerPtrTopDown.clear();
580 void InitFromPred(const BBState &Other);
581 void InitFromSucc(const BBState &Other);
582 void MergePred(const BBState &Other);
583 void MergeSucc(const BBState &Other);
585 /// Return the number of possible unique paths from an entry to an exit
586 /// which pass through this block. This is only valid after both the
587 /// top-down and bottom-up traversals are complete.
588 unsigned GetAllPathCount() const {
589 assert(TopDownPathCount != 0);
590 assert(BottomUpPathCount != 0);
591 return TopDownPathCount * BottomUpPathCount;
594 // Specialized CFG utilities.
595 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
596 edge_iterator pred_begin() { return Preds.begin(); }
597 edge_iterator pred_end() { return Preds.end(); }
598 edge_iterator succ_begin() { return Succs.begin(); }
599 edge_iterator succ_end() { return Succs.end(); }
601 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
602 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
604 bool isExit() const { return Succs.empty(); }
608 void BBState::InitFromPred(const BBState &Other) {
609 PerPtrTopDown = Other.PerPtrTopDown;
610 TopDownPathCount = Other.TopDownPathCount;
613 void BBState::InitFromSucc(const BBState &Other) {
614 PerPtrBottomUp = Other.PerPtrBottomUp;
615 BottomUpPathCount = Other.BottomUpPathCount;
618 /// The top-down traversal uses this to merge information about predecessors to
619 /// form the initial state for a new block.
620 void BBState::MergePred(const BBState &Other) {
621 // Other.TopDownPathCount can be 0, in which case it is either dead or a
622 // loop backedge. Loop backedges are special.
623 TopDownPathCount += Other.TopDownPathCount;
625 // Check for overflow. If we have overflow, fall back to conservative
627 if (TopDownPathCount < Other.TopDownPathCount) {
628 clearTopDownPointers();
632 // For each entry in the other set, if our set has an entry with the same key,
633 // merge the entries. Otherwise, copy the entry and merge it with an empty
635 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
636 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
637 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
638 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
642 // For each entry in our set, if the other set doesn't have an entry with the
643 // same key, force it to merge with an empty entry.
644 for (ptr_iterator MI = top_down_ptr_begin(),
645 ME = top_down_ptr_end(); MI != ME; ++MI)
646 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
647 MI->second.Merge(PtrState(), /*TopDown=*/true);
650 /// The bottom-up traversal uses this to merge information about successors to
651 /// form the initial state for a new block.
652 void BBState::MergeSucc(const BBState &Other) {
653 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
654 // loop backedge. Loop backedges are special.
655 BottomUpPathCount += Other.BottomUpPathCount;
657 // Check for overflow. If we have overflow, fall back to conservative
659 if (BottomUpPathCount < Other.BottomUpPathCount) {
660 clearBottomUpPointers();
664 // For each entry in the other set, if our set has an entry with the
665 // same key, merge the entries. Otherwise, copy the entry and merge
666 // it with an empty entry.
667 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
668 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
669 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
670 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
674 // For each entry in our set, if the other set doesn't have an entry
675 // with the same key, force it to merge with an empty entry.
676 for (ptr_iterator MI = bottom_up_ptr_begin(),
677 ME = bottom_up_ptr_end(); MI != ME; ++MI)
678 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
679 MI->second.Merge(PtrState(), /*TopDown=*/false);
683 /// \brief The main ARC optimization pass.
684 class ObjCARCOpt : public FunctionPass {
686 ProvenanceAnalysis PA;
688 /// A flag indicating whether this optimization pass should run.
691 /// Declarations for ObjC runtime functions, for use in creating calls to
692 /// them. These are initialized lazily to avoid cluttering up the Module
693 /// with unused declarations.
695 /// Declaration for ObjC runtime function
696 /// objc_retainAutoreleasedReturnValue.
697 Constant *RetainRVCallee;
698 /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
699 Constant *AutoreleaseRVCallee;
700 /// Declaration for ObjC runtime function objc_release.
701 Constant *ReleaseCallee;
702 /// Declaration for ObjC runtime function objc_retain.
703 Constant *RetainCallee;
704 /// Declaration for ObjC runtime function objc_retainBlock.
705 Constant *RetainBlockCallee;
706 /// Declaration for ObjC runtime function objc_autorelease.
707 Constant *AutoreleaseCallee;
709 /// Flags which determine whether each of the interesting runtine functions
710 /// is in fact used in the current function.
711 unsigned UsedInThisFunction;
713 /// The Metadata Kind for clang.imprecise_release metadata.
714 unsigned ImpreciseReleaseMDKind;
716 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
717 unsigned CopyOnEscapeMDKind;
719 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
720 unsigned NoObjCARCExceptionsMDKind;
722 Constant *getRetainRVCallee(Module *M);
723 Constant *getAutoreleaseRVCallee(Module *M);
724 Constant *getReleaseCallee(Module *M);
725 Constant *getRetainCallee(Module *M);
726 Constant *getRetainBlockCallee(Module *M);
727 Constant *getAutoreleaseCallee(Module *M);
729 bool IsRetainBlockOptimizable(const Instruction *Inst);
731 void OptimizeRetainCall(Function &F, Instruction *Retain);
732 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
733 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
734 InstructionClass &Class);
735 void OptimizeIndividualCalls(Function &F);
737 void CheckForCFGHazards(const BasicBlock *BB,
738 DenseMap<const BasicBlock *, BBState> &BBStates,
739 BBState &MyStates) const;
740 bool VisitInstructionBottomUp(Instruction *Inst,
742 MapVector<Value *, RRInfo> &Retains,
744 bool VisitBottomUp(BasicBlock *BB,
745 DenseMap<const BasicBlock *, BBState> &BBStates,
746 MapVector<Value *, RRInfo> &Retains);
747 bool VisitInstructionTopDown(Instruction *Inst,
748 DenseMap<Value *, RRInfo> &Releases,
750 bool VisitTopDown(BasicBlock *BB,
751 DenseMap<const BasicBlock *, BBState> &BBStates,
752 DenseMap<Value *, RRInfo> &Releases);
753 bool Visit(Function &F,
754 DenseMap<const BasicBlock *, BBState> &BBStates,
755 MapVector<Value *, RRInfo> &Retains,
756 DenseMap<Value *, RRInfo> &Releases);
758 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
759 MapVector<Value *, RRInfo> &Retains,
760 DenseMap<Value *, RRInfo> &Releases,
761 SmallVectorImpl<Instruction *> &DeadInsts,
764 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
765 MapVector<Value *, RRInfo> &Retains,
766 DenseMap<Value *, RRInfo> &Releases,
768 SmallVector<Instruction *, 4> &NewRetains,
769 SmallVector<Instruction *, 4> &NewReleases,
770 SmallVector<Instruction *, 8> &DeadInsts,
771 RRInfo &RetainsToMove,
772 RRInfo &ReleasesToMove,
775 bool &AnyPairsCompletelyEliminated);
777 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
778 MapVector<Value *, RRInfo> &Retains,
779 DenseMap<Value *, RRInfo> &Releases,
782 void OptimizeWeakCalls(Function &F);
784 bool OptimizeSequences(Function &F);
786 void OptimizeReturns(Function &F);
788 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
789 virtual bool doInitialization(Module &M);
790 virtual bool runOnFunction(Function &F);
791 virtual void releaseMemory();
795 ObjCARCOpt() : FunctionPass(ID) {
796 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
801 char ObjCARCOpt::ID = 0;
802 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
803 "objc-arc", "ObjC ARC optimization", false, false)
804 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
805 INITIALIZE_PASS_END(ObjCARCOpt,
806 "objc-arc", "ObjC ARC optimization", false, false)
808 Pass *llvm::createObjCARCOptPass() {
809 return new ObjCARCOpt();
812 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
813 AU.addRequired<ObjCARCAliasAnalysis>();
814 AU.addRequired<AliasAnalysis>();
815 // ARC optimization doesn't currently split critical edges.
816 AU.setPreservesCFG();
819 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
820 // Without the magic metadata tag, we have to assume this might be an
821 // objc_retainBlock call inserted to convert a block pointer to an id,
822 // in which case it really is needed.
823 if (!Inst->getMetadata(CopyOnEscapeMDKind))
826 // If the pointer "escapes" (not including being used in a call),
827 // the copy may be needed.
828 if (DoesRetainableObjPtrEscape(Inst))
831 // Otherwise, it's not needed.
835 Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
836 if (!RetainRVCallee) {
837 LLVMContext &C = M->getContext();
838 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
839 Type *Params[] = { I8X };
840 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
841 AttributeSet Attribute =
842 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
843 Attribute::NoUnwind);
845 M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
848 return RetainRVCallee;
851 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
852 if (!AutoreleaseRVCallee) {
853 LLVMContext &C = M->getContext();
854 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
855 Type *Params[] = { I8X };
856 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
857 AttributeSet Attribute =
858 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
859 Attribute::NoUnwind);
860 AutoreleaseRVCallee =
861 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
864 return AutoreleaseRVCallee;
867 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
868 if (!ReleaseCallee) {
869 LLVMContext &C = M->getContext();
870 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
871 AttributeSet Attribute =
872 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
873 Attribute::NoUnwind);
875 M->getOrInsertFunction(
877 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
880 return ReleaseCallee;
883 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
885 LLVMContext &C = M->getContext();
886 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
887 AttributeSet Attribute =
888 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
889 Attribute::NoUnwind);
891 M->getOrInsertFunction(
893 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
899 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
900 if (!RetainBlockCallee) {
901 LLVMContext &C = M->getContext();
902 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
903 // objc_retainBlock is not nounwind because it calls user copy constructors
904 // which could theoretically throw.
906 M->getOrInsertFunction(
908 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
911 return RetainBlockCallee;
914 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
915 if (!AutoreleaseCallee) {
916 LLVMContext &C = M->getContext();
917 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
918 AttributeSet Attribute =
919 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
920 Attribute::NoUnwind);
922 M->getOrInsertFunction(
924 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
927 return AutoreleaseCallee;
930 /// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a
933 ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
934 ImmutableCallSite CS(GetObjCArg(Retain));
935 const Instruction *Call = CS.getInstruction();
937 if (Call->getParent() != Retain->getParent()) return;
939 // Check that the call is next to the retain.
940 BasicBlock::const_iterator I = Call;
942 while (isNoopInstruction(I)) ++I;
946 // Turn it to an objc_retainAutoreleasedReturnValue..
950 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainCall: Transforming "
951 "objc_retain => objc_retainAutoreleasedReturnValue"
952 " since the operand is a return value.\n"
956 cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent()));
958 DEBUG(dbgs() << " New: "
962 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
963 /// not a return value. Or, if it can be paired with an
964 /// objc_autoreleaseReturnValue, delete the pair and return true.
966 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
967 // Check for the argument being from an immediately preceding call or invoke.
968 const Value *Arg = GetObjCArg(RetainRV);
969 ImmutableCallSite CS(Arg);
970 if (const Instruction *Call = CS.getInstruction()) {
971 if (Call->getParent() == RetainRV->getParent()) {
972 BasicBlock::const_iterator I = Call;
974 while (isNoopInstruction(I)) ++I;
977 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
978 BasicBlock *RetainRVParent = RetainRV->getParent();
979 if (II->getNormalDest() == RetainRVParent) {
980 BasicBlock::const_iterator I = RetainRVParent->begin();
981 while (isNoopInstruction(I)) ++I;
988 // Check for being preceded by an objc_autoreleaseReturnValue on the same
989 // pointer. In this case, we can delete the pair.
990 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
992 do --I; while (I != Begin && isNoopInstruction(I));
993 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
994 GetObjCArg(I) == Arg) {
998 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Erasing " << *I << "\n"
999 << " Erasing " << *RetainRV
1002 EraseInstruction(I);
1003 EraseInstruction(RetainRV);
1008 // Turn it to a plain objc_retain.
1012 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Transforming "
1013 "objc_retainAutoreleasedReturnValue => "
1014 "objc_retain since the operand is not a return value.\n"
1016 << *RetainRV << "\n");
1018 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
1020 DEBUG(dbgs() << " New: "
1021 << *RetainRV << "\n");
1026 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1027 /// used as a return value.
1029 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1030 InstructionClass &Class) {
1031 // Check for a return of the pointer value.
1032 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1033 SmallVector<const Value *, 2> Users;
1034 Users.push_back(Ptr);
1036 Ptr = Users.pop_back_val();
1037 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1039 const User *I = *UI;
1040 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1042 if (isa<BitCastInst>(I))
1045 } while (!Users.empty());
1050 DEBUG(dbgs() << "ObjCARCOpt::OptimizeAutoreleaseRVCall: Transforming "
1051 "objc_autoreleaseReturnValue => "
1052 "objc_autorelease since its operand is not used as a return "
1055 << *AutoreleaseRV << "\n");
1057 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1059 setCalledFunction(getAutoreleaseCallee(F.getParent()));
1060 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1061 Class = IC_Autorelease;
1063 DEBUG(dbgs() << " New: "
1064 << *AutoreleaseRV << "\n");
1068 /// Visit each call, one at a time, and make simplifications without doing any
1069 /// additional analysis.
1070 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1071 // Reset all the flags in preparation for recomputing them.
1072 UsedInThisFunction = 0;
1074 // Visit all objc_* calls in F.
1075 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1076 Instruction *Inst = &*I++;
1078 InstructionClass Class = GetBasicInstructionClass(Inst);
1080 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Visiting: Class: "
1081 << Class << "; " << *Inst << "\n");
1086 // Delete no-op casts. These function calls have special semantics, but
1087 // the semantics are entirely implemented via lowering in the front-end,
1088 // so by the time they reach the optimizer, they are just no-op calls
1089 // which return their argument.
1091 // There are gray areas here, as the ability to cast reference-counted
1092 // pointers to raw void* and back allows code to break ARC assumptions,
1093 // however these are currently considered to be unimportant.
1097 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Erasing no-op cast:"
1098 " " << *Inst << "\n");
1099 EraseInstruction(Inst);
1102 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1105 case IC_LoadWeakRetained:
1107 case IC_DestroyWeak: {
1108 CallInst *CI = cast<CallInst>(Inst);
1109 if (isNullOrUndef(CI->getArgOperand(0))) {
1111 Type *Ty = CI->getArgOperand(0)->getType();
1112 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1113 Constant::getNullValue(Ty),
1115 llvm::Value *NewValue = UndefValue::get(CI->getType());
1116 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null "
1117 "pointer-to-weak-pointer is undefined behavior.\n"
1121 CI->replaceAllUsesWith(NewValue);
1122 CI->eraseFromParent();
1129 CallInst *CI = cast<CallInst>(Inst);
1130 if (isNullOrUndef(CI->getArgOperand(0)) ||
1131 isNullOrUndef(CI->getArgOperand(1))) {
1133 Type *Ty = CI->getArgOperand(0)->getType();
1134 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1135 Constant::getNullValue(Ty),
1138 llvm::Value *NewValue = UndefValue::get(CI->getType());
1139 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null "
1140 "pointer-to-weak-pointer is undefined behavior.\n"
1145 CI->replaceAllUsesWith(NewValue);
1146 CI->eraseFromParent();
1152 OptimizeRetainCall(F, Inst);
1155 if (OptimizeRetainRVCall(F, Inst))
1158 case IC_AutoreleaseRV:
1159 OptimizeAutoreleaseRVCall(F, Inst, Class);
1163 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1164 if (IsAutorelease(Class) && Inst->use_empty()) {
1165 CallInst *Call = cast<CallInst>(Inst);
1166 const Value *Arg = Call->getArgOperand(0);
1167 Arg = FindSingleUseIdentifiedObject(Arg);
1172 // Create the declaration lazily.
1173 LLVMContext &C = Inst->getContext();
1175 CallInst::Create(getReleaseCallee(F.getParent()),
1176 Call->getArgOperand(0), "", Call);
1177 NewCall->setMetadata(ImpreciseReleaseMDKind,
1178 MDNode::get(C, ArrayRef<Value *>()));
1180 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Replacing "
1181 "objc_autorelease(x) with objc_release(x) since x is "
1182 "otherwise unused.\n"
1183 " Old: " << *Call <<
1187 EraseInstruction(Call);
1193 // For functions which can never be passed stack arguments, add
1195 if (IsAlwaysTail(Class)) {
1197 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Adding tail keyword"
1198 " to function since it can never be passed stack args: " << *Inst <<
1200 cast<CallInst>(Inst)->setTailCall();
1203 // Ensure that functions that can never have a "tail" keyword due to the
1204 // semantics of ARC truly do not do so.
1205 if (IsNeverTail(Class)) {
1207 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Removing tail "
1208 "keyword from function: " << *Inst <<
1210 cast<CallInst>(Inst)->setTailCall(false);
1213 // Set nounwind as needed.
1214 if (IsNoThrow(Class)) {
1216 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Found no throw"
1217 " class. Setting nounwind on: " << *Inst << "\n");
1218 cast<CallInst>(Inst)->setDoesNotThrow();
1221 if (!IsNoopOnNull(Class)) {
1222 UsedInThisFunction |= 1 << Class;
1226 const Value *Arg = GetObjCArg(Inst);
1228 // ARC calls with null are no-ops. Delete them.
1229 if (isNullOrUndef(Arg)) {
1232 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: ARC calls with "
1233 " null are no-ops. Erasing: " << *Inst << "\n");
1234 EraseInstruction(Inst);
1238 // Keep track of which of retain, release, autorelease, and retain_block
1239 // are actually present in this function.
1240 UsedInThisFunction |= 1 << Class;
1242 // If Arg is a PHI, and one or more incoming values to the
1243 // PHI are null, and the call is control-equivalent to the PHI, and there
1244 // are no relevant side effects between the PHI and the call, the call
1245 // could be pushed up to just those paths with non-null incoming values.
1246 // For now, don't bother splitting critical edges for this.
1247 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1248 Worklist.push_back(std::make_pair(Inst, Arg));
1250 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1254 const PHINode *PN = dyn_cast<PHINode>(Arg);
1257 // Determine if the PHI has any null operands, or any incoming
1259 bool HasNull = false;
1260 bool HasCriticalEdges = false;
1261 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1263 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1264 if (isNullOrUndef(Incoming))
1266 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1267 .getNumSuccessors() != 1) {
1268 HasCriticalEdges = true;
1272 // If we have null operands and no critical edges, optimize.
1273 if (!HasCriticalEdges && HasNull) {
1274 SmallPtrSet<Instruction *, 4> DependingInstructions;
1275 SmallPtrSet<const BasicBlock *, 4> Visited;
1277 // Check that there is nothing that cares about the reference
1278 // count between the call and the phi.
1281 case IC_RetainBlock:
1282 // These can always be moved up.
1285 // These can't be moved across things that care about the retain
1287 FindDependencies(NeedsPositiveRetainCount, Arg,
1288 Inst->getParent(), Inst,
1289 DependingInstructions, Visited, PA);
1291 case IC_Autorelease:
1292 // These can't be moved across autorelease pool scope boundaries.
1293 FindDependencies(AutoreleasePoolBoundary, Arg,
1294 Inst->getParent(), Inst,
1295 DependingInstructions, Visited, PA);
1298 case IC_AutoreleaseRV:
1299 // Don't move these; the RV optimization depends on the autoreleaseRV
1300 // being tail called, and the retainRV being immediately after a call
1301 // (which might still happen if we get lucky with codegen layout, but
1302 // it's not worth taking the chance).
1305 llvm_unreachable("Invalid dependence flavor");
1308 if (DependingInstructions.size() == 1 &&
1309 *DependingInstructions.begin() == PN) {
1312 // Clone the call into each predecessor that has a non-null value.
1313 CallInst *CInst = cast<CallInst>(Inst);
1314 Type *ParamTy = CInst->getArgOperand(0)->getType();
1315 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1317 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1318 if (!isNullOrUndef(Incoming)) {
1319 CallInst *Clone = cast<CallInst>(CInst->clone());
1320 Value *Op = PN->getIncomingValue(i);
1321 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1322 if (Op->getType() != ParamTy)
1323 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1324 Clone->setArgOperand(0, Op);
1325 Clone->insertBefore(InsertPos);
1327 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Cloning "
1330 "clone at " << *InsertPos << "\n");
1331 Worklist.push_back(std::make_pair(Clone, Incoming));
1334 // Erase the original call.
1335 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1336 EraseInstruction(CInst);
1340 } while (!Worklist.empty());
1342 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Finished List.\n");
1345 /// Check for critical edges, loop boundaries, irreducible control flow, or
1346 /// other CFG structures where moving code across the edge would result in it
1347 /// being executed more.
1349 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1350 DenseMap<const BasicBlock *, BBState> &BBStates,
1351 BBState &MyStates) const {
1352 // If any top-down local-use or possible-dec has a succ which is earlier in
1353 // the sequence, forget it.
1354 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1355 E = MyStates.top_down_ptr_end(); I != E; ++I)
1356 switch (I->second.GetSeq()) {
1359 const Value *Arg = I->first;
1360 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1361 bool SomeSuccHasSame = false;
1362 bool AllSuccsHaveSame = true;
1363 PtrState &S = I->second;
1364 succ_const_iterator SI(TI), SE(TI, false);
1366 for (; SI != SE; ++SI) {
1367 Sequence SuccSSeq = S_None;
1368 bool SuccSRRIKnownSafe = false;
1369 // If VisitBottomUp has pointer information for this successor, take
1370 // what we know about it.
1371 DenseMap<const BasicBlock *, BBState>::iterator BBI =
1373 assert(BBI != BBStates.end());
1374 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1375 SuccSSeq = SuccS.GetSeq();
1376 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1379 case S_CanRelease: {
1380 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1381 S.ClearSequenceProgress();
1387 SomeSuccHasSame = true;
1391 case S_MovableRelease:
1392 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1393 AllSuccsHaveSame = false;
1396 llvm_unreachable("bottom-up pointer in retain state!");
1399 // If the state at the other end of any of the successor edges
1400 // matches the current state, require all edges to match. This
1401 // guards against loops in the middle of a sequence.
1402 if (SomeSuccHasSame && !AllSuccsHaveSame)
1403 S.ClearSequenceProgress();
1406 case S_CanRelease: {
1407 const Value *Arg = I->first;
1408 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1409 bool SomeSuccHasSame = false;
1410 bool AllSuccsHaveSame = true;
1411 PtrState &S = I->second;
1412 succ_const_iterator SI(TI), SE(TI, false);
1414 for (; SI != SE; ++SI) {
1415 Sequence SuccSSeq = S_None;
1416 bool SuccSRRIKnownSafe = false;
1417 // If VisitBottomUp has pointer information for this successor, take
1418 // what we know about it.
1419 DenseMap<const BasicBlock *, BBState>::iterator BBI =
1421 assert(BBI != BBStates.end());
1422 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1423 SuccSSeq = SuccS.GetSeq();
1424 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1427 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1428 S.ClearSequenceProgress();
1434 SomeSuccHasSame = true;
1438 case S_MovableRelease:
1440 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1441 AllSuccsHaveSame = false;
1444 llvm_unreachable("bottom-up pointer in retain state!");
1447 // If the state at the other end of any of the successor edges
1448 // matches the current state, require all edges to match. This
1449 // guards against loops in the middle of a sequence.
1450 if (SomeSuccHasSame && !AllSuccsHaveSame)
1451 S.ClearSequenceProgress();
1458 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1460 MapVector<Value *, RRInfo> &Retains,
1461 BBState &MyStates) {
1462 bool NestingDetected = false;
1463 InstructionClass Class = GetInstructionClass(Inst);
1464 const Value *Arg = 0;
1468 Arg = GetObjCArg(Inst);
1470 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1472 // If we see two releases in a row on the same pointer. If so, make
1473 // a note, and we'll cicle back to revisit it after we've
1474 // hopefully eliminated the second release, which may allow us to
1475 // eliminate the first release too.
1476 // Theoretically we could implement removal of nested retain+release
1477 // pairs by making PtrState hold a stack of states, but this is
1478 // simple and avoids adding overhead for the non-nested case.
1479 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1480 DEBUG(dbgs() << "ObjCARCOpt::VisitInstructionBottomUp: Found nested "
1481 "releases (i.e. a release pair)\n");
1482 NestingDetected = true;
1485 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1486 S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release);
1487 S.RRI.ReleaseMetadata = ReleaseMetadata;
1488 S.RRI.KnownSafe = S.IsKnownIncremented();
1489 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
1490 S.RRI.Calls.insert(Inst);
1492 S.SetKnownPositiveRefCount();
1495 case IC_RetainBlock:
1496 // An objc_retainBlock call with just a use may need to be kept,
1497 // because it may be copying a block from the stack to the heap.
1498 if (!IsRetainBlockOptimizable(Inst))
1503 Arg = GetObjCArg(Inst);
1505 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1506 S.SetKnownPositiveRefCount();
1508 switch (S.GetSeq()) {
1511 case S_MovableRelease:
1513 S.RRI.ReverseInsertPts.clear();
1516 // Don't do retain+release tracking for IC_RetainRV, because it's
1517 // better to let it remain as the first instruction after a call.
1518 if (Class != IC_RetainRV) {
1519 S.RRI.IsRetainBlock = Class == IC_RetainBlock;
1520 Retains[Inst] = S.RRI;
1522 S.ClearSequenceProgress();
1527 llvm_unreachable("bottom-up pointer in retain state!");
1529 return NestingDetected;
1531 case IC_AutoreleasepoolPop:
1532 // Conservatively, clear MyStates for all known pointers.
1533 MyStates.clearBottomUpPointers();
1534 return NestingDetected;
1535 case IC_AutoreleasepoolPush:
1537 // These are irrelevant.
1538 return NestingDetected;
1543 // Consider any other possible effects of this instruction on each
1544 // pointer being tracked.
1545 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1546 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1547 const Value *Ptr = MI->first;
1549 continue; // Handled above.
1550 PtrState &S = MI->second;
1551 Sequence Seq = S.GetSeq();
1553 // Check for possible releases.
1554 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1558 S.SetSeq(S_CanRelease);
1562 case S_MovableRelease:
1567 llvm_unreachable("bottom-up pointer in retain state!");
1571 // Check for possible direct uses.
1574 case S_MovableRelease:
1575 if (CanUse(Inst, Ptr, PA, Class)) {
1576 assert(S.RRI.ReverseInsertPts.empty());
1577 // If this is an invoke instruction, we're scanning it as part of
1578 // one of its successor blocks, since we can't insert code after it
1579 // in its own block, and we don't want to split critical edges.
1580 if (isa<InvokeInst>(Inst))
1581 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1583 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1585 } else if (Seq == S_Release &&
1586 (Class == IC_User || Class == IC_CallOrUser)) {
1587 // Non-movable releases depend on any possible objc pointer use.
1589 assert(S.RRI.ReverseInsertPts.empty());
1590 // As above; handle invoke specially.
1591 if (isa<InvokeInst>(Inst))
1592 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1594 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1598 if (CanUse(Inst, Ptr, PA, Class))
1606 llvm_unreachable("bottom-up pointer in retain state!");
1610 return NestingDetected;
1614 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1615 DenseMap<const BasicBlock *, BBState> &BBStates,
1616 MapVector<Value *, RRInfo> &Retains) {
1617 bool NestingDetected = false;
1618 BBState &MyStates = BBStates[BB];
1620 // Merge the states from each successor to compute the initial state
1621 // for the current block.
1622 BBState::edge_iterator SI(MyStates.succ_begin()),
1623 SE(MyStates.succ_end());
1625 const BasicBlock *Succ = *SI;
1626 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1627 assert(I != BBStates.end());
1628 MyStates.InitFromSucc(I->second);
1630 for (; SI != SE; ++SI) {
1632 I = BBStates.find(Succ);
1633 assert(I != BBStates.end());
1634 MyStates.MergeSucc(I->second);
1638 // Visit all the instructions, bottom-up.
1639 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1640 Instruction *Inst = llvm::prior(I);
1642 // Invoke instructions are visited as part of their successors (below).
1643 if (isa<InvokeInst>(Inst))
1646 DEBUG(dbgs() << "ObjCARCOpt::VisitButtonUp: Visiting " << *Inst << "\n");
1648 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1651 // If there's a predecessor with an invoke, visit the invoke as if it were
1652 // part of this block, since we can't insert code after an invoke in its own
1653 // block, and we don't want to split critical edges.
1654 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1655 PE(MyStates.pred_end()); PI != PE; ++PI) {
1656 BasicBlock *Pred = *PI;
1657 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1658 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1661 return NestingDetected;
1665 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1666 DenseMap<Value *, RRInfo> &Releases,
1667 BBState &MyStates) {
1668 bool NestingDetected = false;
1669 InstructionClass Class = GetInstructionClass(Inst);
1670 const Value *Arg = 0;
1673 case IC_RetainBlock:
1674 // An objc_retainBlock call with just a use may need to be kept,
1675 // because it may be copying a block from the stack to the heap.
1676 if (!IsRetainBlockOptimizable(Inst))
1681 Arg = GetObjCArg(Inst);
1683 PtrState &S = MyStates.getPtrTopDownState(Arg);
1685 // Don't do retain+release tracking for IC_RetainRV, because it's
1686 // better to let it remain as the first instruction after a call.
1687 if (Class != IC_RetainRV) {
1688 // If we see two retains in a row on the same pointer. If so, make
1689 // a note, and we'll cicle back to revisit it after we've
1690 // hopefully eliminated the second retain, which may allow us to
1691 // eliminate the first retain too.
1692 // Theoretically we could implement removal of nested retain+release
1693 // pairs by making PtrState hold a stack of states, but this is
1694 // simple and avoids adding overhead for the non-nested case.
1695 if (S.GetSeq() == S_Retain)
1696 NestingDetected = true;
1698 S.ResetSequenceProgress(S_Retain);
1699 S.RRI.IsRetainBlock = Class == IC_RetainBlock;
1700 S.RRI.KnownSafe = S.IsKnownIncremented();
1701 S.RRI.Calls.insert(Inst);
1704 S.SetKnownPositiveRefCount();
1706 // A retain can be a potential use; procede to the generic checking
1711 Arg = GetObjCArg(Inst);
1713 PtrState &S = MyStates.getPtrTopDownState(Arg);
1716 switch (S.GetSeq()) {
1719 S.RRI.ReverseInsertPts.clear();
1722 S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1723 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
1724 Releases[Inst] = S.RRI;
1725 S.ClearSequenceProgress();
1731 case S_MovableRelease:
1732 llvm_unreachable("top-down pointer in release state!");
1736 case IC_AutoreleasepoolPop:
1737 // Conservatively, clear MyStates for all known pointers.
1738 MyStates.clearTopDownPointers();
1739 return NestingDetected;
1740 case IC_AutoreleasepoolPush:
1742 // These are irrelevant.
1743 return NestingDetected;
1748 // Consider any other possible effects of this instruction on each
1749 // pointer being tracked.
1750 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
1751 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
1752 const Value *Ptr = MI->first;
1754 continue; // Handled above.
1755 PtrState &S = MI->second;
1756 Sequence Seq = S.GetSeq();
1758 // Check for possible releases.
1759 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1763 S.SetSeq(S_CanRelease);
1764 assert(S.RRI.ReverseInsertPts.empty());
1765 S.RRI.ReverseInsertPts.insert(Inst);
1767 // One call can't cause a transition from S_Retain to S_CanRelease
1768 // and S_CanRelease to S_Use. If we've made the first transition,
1777 case S_MovableRelease:
1778 llvm_unreachable("top-down pointer in release state!");
1782 // Check for possible direct uses.
1785 if (CanUse(Inst, Ptr, PA, Class))
1794 case S_MovableRelease:
1795 llvm_unreachable("top-down pointer in release state!");
1799 return NestingDetected;
1803 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
1804 DenseMap<const BasicBlock *, BBState> &BBStates,
1805 DenseMap<Value *, RRInfo> &Releases) {
1806 bool NestingDetected = false;
1807 BBState &MyStates = BBStates[BB];
1809 // Merge the states from each predecessor to compute the initial state
1810 // for the current block.
1811 BBState::edge_iterator PI(MyStates.pred_begin()),
1812 PE(MyStates.pred_end());
1814 const BasicBlock *Pred = *PI;
1815 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
1816 assert(I != BBStates.end());
1817 MyStates.InitFromPred(I->second);
1819 for (; PI != PE; ++PI) {
1821 I = BBStates.find(Pred);
1822 assert(I != BBStates.end());
1823 MyStates.MergePred(I->second);
1827 // Visit all the instructions, top-down.
1828 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
1829 Instruction *Inst = I;
1831 DEBUG(dbgs() << "ObjCARCOpt::VisitTopDown: Visiting " << *Inst << "\n");
1833 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
1836 CheckForCFGHazards(BB, BBStates, MyStates);
1837 return NestingDetected;
1841 ComputePostOrders(Function &F,
1842 SmallVectorImpl<BasicBlock *> &PostOrder,
1843 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
1844 unsigned NoObjCARCExceptionsMDKind,
1845 DenseMap<const BasicBlock *, BBState> &BBStates) {
1846 /// The visited set, for doing DFS walks.
1847 SmallPtrSet<BasicBlock *, 16> Visited;
1849 // Do DFS, computing the PostOrder.
1850 SmallPtrSet<BasicBlock *, 16> OnStack;
1851 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
1853 // Functions always have exactly one entry block, and we don't have
1854 // any other block that we treat like an entry block.
1855 BasicBlock *EntryBB = &F.getEntryBlock();
1856 BBState &MyStates = BBStates[EntryBB];
1857 MyStates.SetAsEntry();
1858 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
1859 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
1860 Visited.insert(EntryBB);
1861 OnStack.insert(EntryBB);
1864 BasicBlock *CurrBB = SuccStack.back().first;
1865 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
1866 succ_iterator SE(TI, false);
1868 while (SuccStack.back().second != SE) {
1869 BasicBlock *SuccBB = *SuccStack.back().second++;
1870 if (Visited.insert(SuccBB)) {
1871 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
1872 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
1873 BBStates[CurrBB].addSucc(SuccBB);
1874 BBState &SuccStates = BBStates[SuccBB];
1875 SuccStates.addPred(CurrBB);
1876 OnStack.insert(SuccBB);
1880 if (!OnStack.count(SuccBB)) {
1881 BBStates[CurrBB].addSucc(SuccBB);
1882 BBStates[SuccBB].addPred(CurrBB);
1885 OnStack.erase(CurrBB);
1886 PostOrder.push_back(CurrBB);
1887 SuccStack.pop_back();
1888 } while (!SuccStack.empty());
1892 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
1893 // Functions may have many exits, and there also blocks which we treat
1894 // as exits due to ignored edges.
1895 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
1896 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
1897 BasicBlock *ExitBB = I;
1898 BBState &MyStates = BBStates[ExitBB];
1899 if (!MyStates.isExit())
1902 MyStates.SetAsExit();
1904 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
1905 Visited.insert(ExitBB);
1906 while (!PredStack.empty()) {
1907 reverse_dfs_next_succ:
1908 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
1909 while (PredStack.back().second != PE) {
1910 BasicBlock *BB = *PredStack.back().second++;
1911 if (Visited.insert(BB)) {
1912 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
1913 goto reverse_dfs_next_succ;
1916 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
1921 // Visit the function both top-down and bottom-up.
1923 ObjCARCOpt::Visit(Function &F,
1924 DenseMap<const BasicBlock *, BBState> &BBStates,
1925 MapVector<Value *, RRInfo> &Retains,
1926 DenseMap<Value *, RRInfo> &Releases) {
1928 // Use reverse-postorder traversals, because we magically know that loops
1929 // will be well behaved, i.e. they won't repeatedly call retain on a single
1930 // pointer without doing a release. We can't use the ReversePostOrderTraversal
1931 // class here because we want the reverse-CFG postorder to consider each
1932 // function exit point, and we want to ignore selected cycle edges.
1933 SmallVector<BasicBlock *, 16> PostOrder;
1934 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
1935 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
1936 NoObjCARCExceptionsMDKind,
1939 // Use reverse-postorder on the reverse CFG for bottom-up.
1940 bool BottomUpNestingDetected = false;
1941 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1942 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
1944 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
1946 // Use reverse-postorder for top-down.
1947 bool TopDownNestingDetected = false;
1948 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
1949 PostOrder.rbegin(), E = PostOrder.rend();
1951 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
1953 return TopDownNestingDetected && BottomUpNestingDetected;
1956 /// Move the calls in RetainsToMove and ReleasesToMove.
1957 void ObjCARCOpt::MoveCalls(Value *Arg,
1958 RRInfo &RetainsToMove,
1959 RRInfo &ReleasesToMove,
1960 MapVector<Value *, RRInfo> &Retains,
1961 DenseMap<Value *, RRInfo> &Releases,
1962 SmallVectorImpl<Instruction *> &DeadInsts,
1964 Type *ArgTy = Arg->getType();
1965 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
1967 // Insert the new retain and release calls.
1968 for (SmallPtrSet<Instruction *, 2>::const_iterator
1969 PI = ReleasesToMove.ReverseInsertPts.begin(),
1970 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
1971 Instruction *InsertPt = *PI;
1972 Value *MyArg = ArgTy == ParamTy ? Arg :
1973 new BitCastInst(Arg, ParamTy, "", InsertPt);
1975 CallInst::Create(RetainsToMove.IsRetainBlock ?
1976 getRetainBlockCallee(M) : getRetainCallee(M),
1977 MyArg, "", InsertPt);
1978 Call->setDoesNotThrow();
1979 if (RetainsToMove.IsRetainBlock)
1980 Call->setMetadata(CopyOnEscapeMDKind,
1981 MDNode::get(M->getContext(), ArrayRef<Value *>()));
1983 Call->setTailCall();
1985 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Release: " << *Call
1987 " At insertion point: " << *InsertPt
1990 for (SmallPtrSet<Instruction *, 2>::const_iterator
1991 PI = RetainsToMove.ReverseInsertPts.begin(),
1992 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
1993 Instruction *InsertPt = *PI;
1994 Value *MyArg = ArgTy == ParamTy ? Arg :
1995 new BitCastInst(Arg, ParamTy, "", InsertPt);
1996 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
1998 // Attach a clang.imprecise_release metadata tag, if appropriate.
1999 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2000 Call->setMetadata(ImpreciseReleaseMDKind, M);
2001 Call->setDoesNotThrow();
2002 if (ReleasesToMove.IsTailCallRelease)
2003 Call->setTailCall();
2005 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Retain: " << *Call
2007 " At insertion point: " << *InsertPt
2011 // Delete the original retain and release calls.
2012 for (SmallPtrSet<Instruction *, 2>::const_iterator
2013 AI = RetainsToMove.Calls.begin(),
2014 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2015 Instruction *OrigRetain = *AI;
2016 Retains.blot(OrigRetain);
2017 DeadInsts.push_back(OrigRetain);
2018 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting retain: " << *OrigRetain <<
2021 for (SmallPtrSet<Instruction *, 2>::const_iterator
2022 AI = ReleasesToMove.Calls.begin(),
2023 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2024 Instruction *OrigRelease = *AI;
2025 Releases.erase(OrigRelease);
2026 DeadInsts.push_back(OrigRelease);
2027 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting release: " << *OrigRelease
2033 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2035 MapVector<Value *, RRInfo> &Retains,
2036 DenseMap<Value *, RRInfo> &Releases,
2038 SmallVector<Instruction *, 4> &NewRetains,
2039 SmallVector<Instruction *, 4> &NewReleases,
2040 SmallVector<Instruction *, 8> &DeadInsts,
2041 RRInfo &RetainsToMove,
2042 RRInfo &ReleasesToMove,
2045 bool &AnyPairsCompletelyEliminated) {
2046 // If a pair happens in a region where it is known that the reference count
2047 // is already incremented, we can similarly ignore possible decrements.
2048 bool KnownSafeTD = true, KnownSafeBU = true;
2050 // Connect the dots between the top-down-collected RetainsToMove and
2051 // bottom-up-collected ReleasesToMove to form sets of related calls.
2052 // This is an iterative process so that we connect multiple releases
2053 // to multiple retains if needed.
2054 unsigned OldDelta = 0;
2055 unsigned NewDelta = 0;
2056 unsigned OldCount = 0;
2057 unsigned NewCount = 0;
2058 bool FirstRelease = true;
2059 bool FirstRetain = true;
2061 for (SmallVectorImpl<Instruction *>::const_iterator
2062 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2063 Instruction *NewRetain = *NI;
2064 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2065 assert(It != Retains.end());
2066 const RRInfo &NewRetainRRI = It->second;
2067 KnownSafeTD &= NewRetainRRI.KnownSafe;
2068 for (SmallPtrSet<Instruction *, 2>::const_iterator
2069 LI = NewRetainRRI.Calls.begin(),
2070 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2071 Instruction *NewRetainRelease = *LI;
2072 DenseMap<Value *, RRInfo>::const_iterator Jt =
2073 Releases.find(NewRetainRelease);
2074 if (Jt == Releases.end())
2076 const RRInfo &NewRetainReleaseRRI = Jt->second;
2077 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2078 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2080 BBStates[NewRetainRelease->getParent()].GetAllPathCount();
2082 // Merge the ReleaseMetadata and IsTailCallRelease values.
2084 ReleasesToMove.ReleaseMetadata =
2085 NewRetainReleaseRRI.ReleaseMetadata;
2086 ReleasesToMove.IsTailCallRelease =
2087 NewRetainReleaseRRI.IsTailCallRelease;
2088 FirstRelease = false;
2090 if (ReleasesToMove.ReleaseMetadata !=
2091 NewRetainReleaseRRI.ReleaseMetadata)
2092 ReleasesToMove.ReleaseMetadata = 0;
2093 if (ReleasesToMove.IsTailCallRelease !=
2094 NewRetainReleaseRRI.IsTailCallRelease)
2095 ReleasesToMove.IsTailCallRelease = false;
2098 // Collect the optimal insertion points.
2100 for (SmallPtrSet<Instruction *, 2>::const_iterator
2101 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2102 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2104 Instruction *RIP = *RI;
2105 if (ReleasesToMove.ReverseInsertPts.insert(RIP))
2106 NewDelta -= BBStates[RIP->getParent()].GetAllPathCount();
2108 NewReleases.push_back(NewRetainRelease);
2113 if (NewReleases.empty()) break;
2115 // Back the other way.
2116 for (SmallVectorImpl<Instruction *>::const_iterator
2117 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2118 Instruction *NewRelease = *NI;
2119 DenseMap<Value *, RRInfo>::const_iterator It =
2120 Releases.find(NewRelease);
2121 assert(It != Releases.end());
2122 const RRInfo &NewReleaseRRI = It->second;
2123 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2124 for (SmallPtrSet<Instruction *, 2>::const_iterator
2125 LI = NewReleaseRRI.Calls.begin(),
2126 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2127 Instruction *NewReleaseRetain = *LI;
2128 MapVector<Value *, RRInfo>::const_iterator Jt =
2129 Retains.find(NewReleaseRetain);
2130 if (Jt == Retains.end())
2132 const RRInfo &NewReleaseRetainRRI = Jt->second;
2133 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2134 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2135 unsigned PathCount =
2136 BBStates[NewReleaseRetain->getParent()].GetAllPathCount();
2137 OldDelta += PathCount;
2138 OldCount += PathCount;
2140 // Merge the IsRetainBlock values.
2142 RetainsToMove.IsRetainBlock = NewReleaseRetainRRI.IsRetainBlock;
2143 FirstRetain = false;
2144 } else if (ReleasesToMove.IsRetainBlock !=
2145 NewReleaseRetainRRI.IsRetainBlock)
2146 // It's not possible to merge the sequences if one uses
2147 // objc_retain and the other uses objc_retainBlock.
2150 // Collect the optimal insertion points.
2152 for (SmallPtrSet<Instruction *, 2>::const_iterator
2153 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2154 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2156 Instruction *RIP = *RI;
2157 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2158 PathCount = BBStates[RIP->getParent()].GetAllPathCount();
2159 NewDelta += PathCount;
2160 NewCount += PathCount;
2163 NewRetains.push_back(NewReleaseRetain);
2167 NewReleases.clear();
2168 if (NewRetains.empty()) break;
2171 // If the pointer is known incremented or nested, we can safely delete the
2172 // pair regardless of what's between them.
2173 if (KnownSafeTD || KnownSafeBU) {
2174 RetainsToMove.ReverseInsertPts.clear();
2175 ReleasesToMove.ReverseInsertPts.clear();
2178 // Determine whether the new insertion points we computed preserve the
2179 // balance of retain and release calls through the program.
2180 // TODO: If the fully aggressive solution isn't valid, try to find a
2181 // less aggressive solution which is.
2186 // Determine whether the original call points are balanced in the retain and
2187 // release calls through the program. If not, conservatively don't touch
2189 // TODO: It's theoretically possible to do code motion in this case, as
2190 // long as the existing imbalances are maintained.
2195 assert(OldCount != 0 && "Unreachable code?");
2196 NumRRs += OldCount - NewCount;
2197 // Set to true if we completely removed any RR pairs.
2198 AnyPairsCompletelyEliminated = NewCount == 0;
2200 // We can move calls!
2204 /// Identify pairings between the retains and releases, and delete and/or move
2207 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2209 MapVector<Value *, RRInfo> &Retains,
2210 DenseMap<Value *, RRInfo> &Releases,
2212 bool AnyPairsCompletelyEliminated = false;
2213 RRInfo RetainsToMove;
2214 RRInfo ReleasesToMove;
2215 SmallVector<Instruction *, 4> NewRetains;
2216 SmallVector<Instruction *, 4> NewReleases;
2217 SmallVector<Instruction *, 8> DeadInsts;
2219 // Visit each retain.
2220 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2221 E = Retains.end(); I != E; ++I) {
2222 Value *V = I->first;
2223 if (!V) continue; // blotted
2225 Instruction *Retain = cast<Instruction>(V);
2227 DEBUG(dbgs() << "ObjCARCOpt::PerformCodePlacement: Visiting: " << *Retain
2230 Value *Arg = GetObjCArg(Retain);
2232 // If the object being released is in static or stack storage, we know it's
2233 // not being managed by ObjC reference counting, so we can delete pairs
2234 // regardless of what possible decrements or uses lie between them.
2235 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2237 // A constant pointer can't be pointing to an object on the heap. It may
2238 // be reference-counted, but it won't be deleted.
2239 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2240 if (const GlobalVariable *GV =
2241 dyn_cast<GlobalVariable>(
2242 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2243 if (GV->isConstant())
2246 // Connect the dots between the top-down-collected RetainsToMove and
2247 // bottom-up-collected ReleasesToMove to form sets of related calls.
2248 NewRetains.push_back(Retain);
2249 bool PerformMoveCalls =
2250 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2251 NewReleases, DeadInsts, RetainsToMove,
2252 ReleasesToMove, Arg, KnownSafe,
2253 AnyPairsCompletelyEliminated);
2255 if (PerformMoveCalls) {
2256 // Ok, everything checks out and we're all set. Let's move/delete some
2258 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2259 Retains, Releases, DeadInsts, M);
2262 // Clean up state for next retain.
2263 NewReleases.clear();
2265 RetainsToMove.clear();
2266 ReleasesToMove.clear();
2269 // Now that we're done moving everything, we can delete the newly dead
2270 // instructions, as we no longer need them as insert points.
2271 while (!DeadInsts.empty())
2272 EraseInstruction(DeadInsts.pop_back_val());
2274 return AnyPairsCompletelyEliminated;
2277 /// Weak pointer optimizations.
2278 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2279 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2280 // itself because it uses AliasAnalysis and we need to do provenance
2282 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2283 Instruction *Inst = &*I++;
2285 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Visiting: " << *Inst <<
2288 InstructionClass Class = GetBasicInstructionClass(Inst);
2289 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2292 // Delete objc_loadWeak calls with no users.
2293 if (Class == IC_LoadWeak && Inst->use_empty()) {
2294 Inst->eraseFromParent();
2298 // TODO: For now, just look for an earlier available version of this value
2299 // within the same block. Theoretically, we could do memdep-style non-local
2300 // analysis too, but that would want caching. A better approach would be to
2301 // use the technique that EarlyCSE uses.
2302 inst_iterator Current = llvm::prior(I);
2303 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2304 for (BasicBlock::iterator B = CurrentBB->begin(),
2305 J = Current.getInstructionIterator();
2307 Instruction *EarlierInst = &*llvm::prior(J);
2308 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2309 switch (EarlierClass) {
2311 case IC_LoadWeakRetained: {
2312 // If this is loading from the same pointer, replace this load's value
2314 CallInst *Call = cast<CallInst>(Inst);
2315 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2316 Value *Arg = Call->getArgOperand(0);
2317 Value *EarlierArg = EarlierCall->getArgOperand(0);
2318 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2319 case AliasAnalysis::MustAlias:
2321 // If the load has a builtin retain, insert a plain retain for it.
2322 if (Class == IC_LoadWeakRetained) {
2324 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2328 // Zap the fully redundant load.
2329 Call->replaceAllUsesWith(EarlierCall);
2330 Call->eraseFromParent();
2332 case AliasAnalysis::MayAlias:
2333 case AliasAnalysis::PartialAlias:
2335 case AliasAnalysis::NoAlias:
2342 // If this is storing to the same pointer and has the same size etc.
2343 // replace this load's value with the stored value.
2344 CallInst *Call = cast<CallInst>(Inst);
2345 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2346 Value *Arg = Call->getArgOperand(0);
2347 Value *EarlierArg = EarlierCall->getArgOperand(0);
2348 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2349 case AliasAnalysis::MustAlias:
2351 // If the load has a builtin retain, insert a plain retain for it.
2352 if (Class == IC_LoadWeakRetained) {
2354 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2358 // Zap the fully redundant load.
2359 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2360 Call->eraseFromParent();
2362 case AliasAnalysis::MayAlias:
2363 case AliasAnalysis::PartialAlias:
2365 case AliasAnalysis::NoAlias:
2372 // TOOD: Grab the copied value.
2374 case IC_AutoreleasepoolPush:
2377 // Weak pointers are only modified through the weak entry points
2378 // (and arbitrary calls, which could call the weak entry points).
2381 // Anything else could modify the weak pointer.
2388 // Then, for each destroyWeak with an alloca operand, check to see if
2389 // the alloca and all its users can be zapped.
2390 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2391 Instruction *Inst = &*I++;
2392 InstructionClass Class = GetBasicInstructionClass(Inst);
2393 if (Class != IC_DestroyWeak)
2396 CallInst *Call = cast<CallInst>(Inst);
2397 Value *Arg = Call->getArgOperand(0);
2398 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2399 for (Value::use_iterator UI = Alloca->use_begin(),
2400 UE = Alloca->use_end(); UI != UE; ++UI) {
2401 const Instruction *UserInst = cast<Instruction>(*UI);
2402 switch (GetBasicInstructionClass(UserInst)) {
2405 case IC_DestroyWeak:
2412 for (Value::use_iterator UI = Alloca->use_begin(),
2413 UE = Alloca->use_end(); UI != UE; ) {
2414 CallInst *UserInst = cast<CallInst>(*UI++);
2415 switch (GetBasicInstructionClass(UserInst)) {
2418 // These functions return their second argument.
2419 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2421 case IC_DestroyWeak:
2425 llvm_unreachable("alloca really is used!");
2427 UserInst->eraseFromParent();
2429 Alloca->eraseFromParent();
2434 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Finished List.\n\n");
2438 /// Identify program paths which execute sequences of retains and releases which
2439 /// can be eliminated.
2440 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2441 /// Releases, Retains - These are used to store the results of the main flow
2442 /// analysis. These use Value* as the key instead of Instruction* so that the
2443 /// map stays valid when we get around to rewriting code and calls get
2444 /// replaced by arguments.
2445 DenseMap<Value *, RRInfo> Releases;
2446 MapVector<Value *, RRInfo> Retains;
2448 /// This is used during the traversal of the function to track the
2449 /// states for each identified object at each block.
2450 DenseMap<const BasicBlock *, BBState> BBStates;
2452 // Analyze the CFG of the function, and all instructions.
2453 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2456 return PerformCodePlacement(BBStates, Retains, Releases, F.getParent()) &&
2460 /// Look for this pattern:
2462 /// %call = call i8* @something(...)
2463 /// %2 = call i8* @objc_retain(i8* %call)
2464 /// %3 = call i8* @objc_autorelease(i8* %2)
2467 /// And delete the retain and autorelease.
2469 /// Otherwise if it's just this:
2471 /// %3 = call i8* @objc_autorelease(i8* %2)
2474 /// convert the autorelease to autoreleaseRV.
2475 void ObjCARCOpt::OptimizeReturns(Function &F) {
2476 if (!F.getReturnType()->isPointerTy())
2479 SmallPtrSet<Instruction *, 4> DependingInstructions;
2480 SmallPtrSet<const BasicBlock *, 4> Visited;
2481 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2482 BasicBlock *BB = FI;
2483 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2485 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Visiting: " << *Ret << "\n");
2489 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
2490 FindDependencies(NeedsPositiveRetainCount, Arg,
2491 BB, Ret, DependingInstructions, Visited, PA);
2492 if (DependingInstructions.size() != 1)
2496 CallInst *Autorelease =
2497 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
2500 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
2501 if (!IsAutorelease(AutoreleaseClass))
2503 if (GetObjCArg(Autorelease) != Arg)
2506 DependingInstructions.clear();
2509 // Check that there is nothing that can affect the reference
2510 // count between the autorelease and the retain.
2511 FindDependencies(CanChangeRetainCount, Arg,
2512 BB, Autorelease, DependingInstructions, Visited, PA);
2513 if (DependingInstructions.size() != 1)
2518 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
2520 // Check that we found a retain with the same argument.
2522 !IsRetain(GetBasicInstructionClass(Retain)) ||
2523 GetObjCArg(Retain) != Arg)
2526 DependingInstructions.clear();
2529 // Convert the autorelease to an autoreleaseRV, since it's
2530 // returning the value.
2531 if (AutoreleaseClass == IC_Autorelease) {
2532 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Converting autorelease "
2533 "=> autoreleaseRV since it's returning a value.\n"
2534 " In: " << *Autorelease
2536 Autorelease->setCalledFunction(getAutoreleaseRVCallee(F.getParent()));
2537 DEBUG(dbgs() << " Out: " << *Autorelease
2539 Autorelease->setTailCall(); // Always tail call autoreleaseRV.
2540 AutoreleaseClass = IC_AutoreleaseRV;
2543 // Check that there is nothing that can affect the reference
2544 // count between the retain and the call.
2545 // Note that Retain need not be in BB.
2546 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2547 DependingInstructions, Visited, PA);
2548 if (DependingInstructions.size() != 1)
2553 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
2555 // Check that the pointer is the return value of the call.
2556 if (!Call || Arg != Call)
2559 // Check that the call is a regular call.
2560 InstructionClass Class = GetBasicInstructionClass(Call);
2561 if (Class != IC_CallOrUser && Class != IC_Call)
2564 // If so, we can zap the retain and autorelease.
2567 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Erasing: " << *Retain
2569 << *Autorelease << "\n");
2570 EraseInstruction(Retain);
2571 EraseInstruction(Autorelease);
2577 DependingInstructions.clear();
2581 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Finished List.\n\n");
2585 bool ObjCARCOpt::doInitialization(Module &M) {
2589 // If nothing in the Module uses ARC, don't do anything.
2590 Run = ModuleHasARC(M);
2594 // Identify the imprecise release metadata kind.
2595 ImpreciseReleaseMDKind =
2596 M.getContext().getMDKindID("clang.imprecise_release");
2597 CopyOnEscapeMDKind =
2598 M.getContext().getMDKindID("clang.arc.copy_on_escape");
2599 NoObjCARCExceptionsMDKind =
2600 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
2602 // Intuitively, objc_retain and others are nocapture, however in practice
2603 // they are not, because they return their argument value. And objc_release
2604 // calls finalizers which can have arbitrary side effects.
2606 // These are initialized lazily.
2608 AutoreleaseRVCallee = 0;
2611 RetainBlockCallee = 0;
2612 AutoreleaseCallee = 0;
2617 bool ObjCARCOpt::runOnFunction(Function &F) {
2621 // If nothing in the Module uses ARC, don't do anything.
2627 DEBUG(dbgs() << "ObjCARCOpt: Visiting Function: " << F.getName() << "\n");
2629 PA.setAA(&getAnalysis<AliasAnalysis>());
2631 // This pass performs several distinct transformations. As a compile-time aid
2632 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2633 // library functions aren't declared.
2635 // Preliminary optimizations. This also computs UsedInThisFunction.
2636 OptimizeIndividualCalls(F);
2638 // Optimizations for weak pointers.
2639 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
2640 (1 << IC_LoadWeakRetained) |
2641 (1 << IC_StoreWeak) |
2642 (1 << IC_InitWeak) |
2643 (1 << IC_CopyWeak) |
2644 (1 << IC_MoveWeak) |
2645 (1 << IC_DestroyWeak)))
2646 OptimizeWeakCalls(F);
2648 // Optimizations for retain+release pairs.
2649 if (UsedInThisFunction & ((1 << IC_Retain) |
2650 (1 << IC_RetainRV) |
2651 (1 << IC_RetainBlock)))
2652 if (UsedInThisFunction & (1 << IC_Release))
2653 // Run OptimizeSequences until it either stops making changes or
2654 // no retain+release pair nesting is detected.
2655 while (OptimizeSequences(F)) {}
2657 // Optimizations if objc_autorelease is used.
2658 if (UsedInThisFunction & ((1 << IC_Autorelease) |
2659 (1 << IC_AutoreleaseRV)))
2662 DEBUG(dbgs() << "\n");
2667 void ObjCARCOpt::releaseMemory() {