1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "ARCRuntimeEntryPoints.h"
30 #include "DependencyAnalysis.h"
31 #include "ObjCARCAliasAnalysis.h"
32 #include "ProvenanceAnalysis.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DenseSet.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/Statistic.h"
38 #include "llvm/IR/CFG.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/LLVMContext.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
45 using namespace llvm::objcarc;
47 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
51 /// \brief An associative container with fast insertion-order (deterministic)
52 /// iteration over its elements. Plus the special blot operation.
53 template<class KeyT, class ValueT>
55 /// Map keys to indices in Vector.
56 typedef DenseMap<KeyT, size_t> MapTy;
59 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
64 typedef typename VectorTy::iterator iterator;
65 typedef typename VectorTy::const_iterator const_iterator;
66 iterator begin() { return Vector.begin(); }
67 iterator end() { return Vector.end(); }
68 const_iterator begin() const { return Vector.begin(); }
69 const_iterator end() const { return Vector.end(); }
73 assert(Vector.size() >= Map.size()); // May differ due to blotting.
74 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
76 assert(I->second < Vector.size());
77 assert(Vector[I->second].first == I->first);
79 for (typename VectorTy::const_iterator I = Vector.begin(),
80 E = Vector.end(); I != E; ++I)
82 (Map.count(I->first) &&
83 Map[I->first] == size_t(I - Vector.begin())));
87 ValueT &operator[](const KeyT &Arg) {
88 std::pair<typename MapTy::iterator, bool> Pair =
89 Map.insert(std::make_pair(Arg, size_t(0)));
91 size_t Num = Vector.size();
92 Pair.first->second = Num;
93 Vector.push_back(std::make_pair(Arg, ValueT()));
94 return Vector[Num].second;
96 return Vector[Pair.first->second].second;
99 std::pair<iterator, bool>
100 insert(const std::pair<KeyT, ValueT> &InsertPair) {
101 std::pair<typename MapTy::iterator, bool> Pair =
102 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
104 size_t Num = Vector.size();
105 Pair.first->second = Num;
106 Vector.push_back(InsertPair);
107 return std::make_pair(Vector.begin() + Num, true);
109 return std::make_pair(Vector.begin() + Pair.first->second, false);
112 iterator find(const KeyT &Key) {
113 typename MapTy::iterator It = Map.find(Key);
114 if (It == Map.end()) return Vector.end();
115 return Vector.begin() + It->second;
118 const_iterator find(const KeyT &Key) const {
119 typename MapTy::const_iterator It = Map.find(Key);
120 if (It == Map.end()) return Vector.end();
121 return Vector.begin() + It->second;
124 /// This is similar to erase, but instead of removing the element from the
125 /// vector, it just zeros out the key in the vector. This leaves iterators
126 /// intact, but clients must be prepared for zeroed-out keys when iterating.
127 void blot(const KeyT &Key) {
128 typename MapTy::iterator It = Map.find(Key);
129 if (It == Map.end()) return;
130 Vector[It->second].first = KeyT();
143 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
146 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
147 /// as it finds a value with multiple uses.
148 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
149 if (Arg->hasOneUse()) {
150 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
151 return FindSingleUseIdentifiedObject(BC->getOperand(0));
152 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
153 if (GEP->hasAllZeroIndices())
154 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
155 if (IsForwarding(GetBasicInstructionClass(Arg)))
156 return FindSingleUseIdentifiedObject(
157 cast<CallInst>(Arg)->getArgOperand(0));
158 if (!IsObjCIdentifiedObject(Arg))
163 // If we found an identifiable object but it has multiple uses, but they are
164 // trivial uses, we can still consider this to be a single-use value.
165 if (IsObjCIdentifiedObject(Arg)) {
166 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
169 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
179 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
180 /// GetUnderlyingObjects except that it returns early when it sees the first
182 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
183 SmallPtrSet<const Value *, 4> Visited;
184 SmallVector<const Value *, 4> Worklist;
185 Worklist.push_back(V);
187 const Value *P = Worklist.pop_back_val();
188 P = GetUnderlyingObjCPtr(P);
190 if (isa<AllocaInst>(P))
193 if (!Visited.insert(P))
196 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
197 Worklist.push_back(SI->getTrueValue());
198 Worklist.push_back(SI->getFalseValue());
202 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
203 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
204 Worklist.push_back(PN->getIncomingValue(i));
207 } while (!Worklist.empty());
215 /// \defgroup ARCOpt ARC Optimization.
218 // TODO: On code like this:
221 // stuff_that_cannot_release()
222 // objc_autorelease(%x)
223 // stuff_that_cannot_release()
225 // stuff_that_cannot_release()
226 // objc_autorelease(%x)
228 // The second retain and autorelease can be deleted.
230 // TODO: It should be possible to delete
231 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
232 // pairs if nothing is actually autoreleased between them. Also, autorelease
233 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
234 // after inlining) can be turned into plain release calls.
236 // TODO: Critical-edge splitting. If the optimial insertion point is
237 // a critical edge, the current algorithm has to fail, because it doesn't
238 // know how to split edges. It should be possible to make the optimizer
239 // think in terms of edges, rather than blocks, and then split critical
242 // TODO: OptimizeSequences could generalized to be Interprocedural.
244 // TODO: Recognize that a bunch of other objc runtime calls have
245 // non-escaping arguments and non-releasing arguments, and may be
246 // non-autoreleasing.
248 // TODO: Sink autorelease calls as far as possible. Unfortunately we
249 // usually can't sink them past other calls, which would be the main
250 // case where it would be useful.
252 // TODO: The pointer returned from objc_loadWeakRetained is retained.
254 // TODO: Delete release+retain pairs (rare).
256 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
257 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
258 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
259 STATISTIC(NumRets, "Number of return value forwarding "
260 "retain+autoreleases eliminated");
261 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
262 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
264 STATISTIC(NumRetainsBeforeOpt,
265 "Number of retains before optimization");
266 STATISTIC(NumReleasesBeforeOpt,
267 "Number of releases before optimization");
268 STATISTIC(NumRetainsAfterOpt,
269 "Number of retains after optimization");
270 STATISTIC(NumReleasesAfterOpt,
271 "Number of releases after optimization");
277 /// \brief A sequence of states that a pointer may go through in which an
278 /// objc_retain and objc_release are actually needed.
281 S_Retain, ///< objc_retain(x).
282 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
283 S_Use, ///< any use of x.
284 S_Stop, ///< like S_Release, but code motion is stopped.
285 S_Release, ///< objc_release(x).
286 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
289 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
290 LLVM_ATTRIBUTE_UNUSED;
291 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
294 return OS << "S_None";
296 return OS << "S_Retain";
298 return OS << "S_CanRelease";
300 return OS << "S_Use";
302 return OS << "S_Release";
303 case S_MovableRelease:
304 return OS << "S_MovableRelease";
306 return OS << "S_Stop";
308 llvm_unreachable("Unknown sequence type.");
312 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
316 if (A == S_None || B == S_None)
319 if (A > B) std::swap(A, B);
321 // Choose the side which is further along in the sequence.
322 if ((A == S_Retain || A == S_CanRelease) &&
323 (B == S_CanRelease || B == S_Use))
326 // Choose the side which is further along in the sequence.
327 if ((A == S_Use || A == S_CanRelease) &&
328 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
330 // If both sides are releases, choose the more conservative one.
331 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
333 if (A == S_Release && B == S_MovableRelease)
341 /// \brief Unidirectional information about either a
342 /// retain-decrement-use-release sequence or release-use-decrement-retain
343 /// reverse sequence.
345 /// After an objc_retain, the reference count of the referenced
346 /// object is known to be positive. Similarly, before an objc_release, the
347 /// reference count of the referenced object is known to be positive. If
348 /// there are retain-release pairs in code regions where the retain count
349 /// is known to be positive, they can be eliminated, regardless of any side
350 /// effects between them.
352 /// Also, a retain+release pair nested within another retain+release
353 /// pair all on the known same pointer value can be eliminated, regardless
354 /// of any intervening side effects.
356 /// KnownSafe is true when either of these conditions is satisfied.
359 /// True of the objc_release calls are all marked with the "tail" keyword.
360 bool IsTailCallRelease;
362 /// If the Calls are objc_release calls and they all have a
363 /// clang.imprecise_release tag, this is the metadata tag.
364 MDNode *ReleaseMetadata;
366 /// For a top-down sequence, the set of objc_retains or
367 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
368 SmallPtrSet<Instruction *, 2> Calls;
370 /// The set of optimal insert positions for moving calls in the opposite
372 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
374 /// If this is true, we cannot perform code motion but can still remove
375 /// retain/release pairs.
376 bool CFGHazardAfflicted;
379 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0),
380 CFGHazardAfflicted(false) {}
384 /// Conservatively merge the two RRInfo. Returns true if a partial merge has
385 /// occurred, false otherwise.
386 bool Merge(const RRInfo &Other);
391 void RRInfo::clear() {
393 IsTailCallRelease = false;
396 ReverseInsertPts.clear();
397 CFGHazardAfflicted = false;
400 bool RRInfo::Merge(const RRInfo &Other) {
401 // Conservatively merge the ReleaseMetadata information.
402 if (ReleaseMetadata != Other.ReleaseMetadata)
405 // Conservatively merge the boolean state.
406 KnownSafe &= Other.KnownSafe;
407 IsTailCallRelease &= Other.IsTailCallRelease;
408 CFGHazardAfflicted |= Other.CFGHazardAfflicted;
410 // Merge the call sets.
411 Calls.insert(Other.Calls.begin(), Other.Calls.end());
413 // Merge the insert point sets. If there are any differences,
414 // that makes this a partial merge.
415 bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
416 for (SmallPtrSet<Instruction *, 2>::const_iterator
417 I = Other.ReverseInsertPts.begin(),
418 E = Other.ReverseInsertPts.end(); I != E; ++I)
419 Partial |= ReverseInsertPts.insert(*I);
424 /// \brief This class summarizes several per-pointer runtime properties which
425 /// are propogated through the flow graph.
427 /// True if the reference count is known to be incremented.
428 bool KnownPositiveRefCount;
430 /// True if we've seen an opportunity for partial RR elimination, such as
431 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
434 /// The current position in the sequence.
435 unsigned char Seq : 8;
437 /// Unidirectional information about the current sequence.
441 PtrState() : KnownPositiveRefCount(false), Partial(false),
445 bool IsKnownSafe() const {
446 return RRI.KnownSafe;
449 void SetKnownSafe(const bool NewValue) {
450 RRI.KnownSafe = NewValue;
453 bool IsTailCallRelease() const {
454 return RRI.IsTailCallRelease;
457 void SetTailCallRelease(const bool NewValue) {
458 RRI.IsTailCallRelease = NewValue;
461 bool IsTrackingImpreciseReleases() const {
462 return RRI.ReleaseMetadata != 0;
465 const MDNode *GetReleaseMetadata() const {
466 return RRI.ReleaseMetadata;
469 void SetReleaseMetadata(MDNode *NewValue) {
470 RRI.ReleaseMetadata = NewValue;
473 bool IsCFGHazardAfflicted() const {
474 return RRI.CFGHazardAfflicted;
477 void SetCFGHazardAfflicted(const bool NewValue) {
478 RRI.CFGHazardAfflicted = NewValue;
481 void SetKnownPositiveRefCount() {
482 DEBUG(dbgs() << "Setting Known Positive.\n");
483 KnownPositiveRefCount = true;
486 void ClearKnownPositiveRefCount() {
487 DEBUG(dbgs() << "Clearing Known Positive.\n");
488 KnownPositiveRefCount = false;
491 bool HasKnownPositiveRefCount() const {
492 return KnownPositiveRefCount;
495 void SetSeq(Sequence NewSeq) {
496 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
500 Sequence GetSeq() const {
501 return static_cast<Sequence>(Seq);
504 void ClearSequenceProgress() {
505 ResetSequenceProgress(S_None);
508 void ResetSequenceProgress(Sequence NewSeq) {
509 DEBUG(dbgs() << "Resetting sequence progress.\n");
515 void Merge(const PtrState &Other, bool TopDown);
517 void InsertCall(Instruction *I) {
521 void InsertReverseInsertPt(Instruction *I) {
522 RRI.ReverseInsertPts.insert(I);
525 void ClearReverseInsertPts() {
526 RRI.ReverseInsertPts.clear();
529 bool HasReverseInsertPts() const {
530 return !RRI.ReverseInsertPts.empty();
533 const RRInfo &GetRRInfo() const {
540 PtrState::Merge(const PtrState &Other, bool TopDown) {
541 Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
542 KnownPositiveRefCount &= Other.KnownPositiveRefCount;
544 // If we're not in a sequence (anymore), drop all associated state.
548 } else if (Partial || Other.Partial) {
549 // If we're doing a merge on a path that's previously seen a partial
550 // merge, conservatively drop the sequence, to avoid doing partial
551 // RR elimination. If the branch predicates for the two merge differ,
552 // mixing them is unsafe.
553 ClearSequenceProgress();
555 // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
556 // point, we know that currently we are not partial. Stash whether or not
557 // the merge operation caused us to undergo a partial merging of reverse
559 Partial = RRI.Merge(Other.RRI);
564 /// \brief Per-BasicBlock state.
566 /// The number of unique control paths from the entry which can reach this
568 unsigned TopDownPathCount;
570 /// The number of unique control paths to exits from this block.
571 unsigned BottomUpPathCount;
573 /// A type for PerPtrTopDown and PerPtrBottomUp.
574 typedef MapVector<const Value *, PtrState> MapTy;
576 /// The top-down traversal uses this to record information known about a
577 /// pointer at the bottom of each block.
580 /// The bottom-up traversal uses this to record information known about a
581 /// pointer at the top of each block.
582 MapTy PerPtrBottomUp;
584 /// Effective predecessors of the current block ignoring ignorable edges and
585 /// ignored backedges.
586 SmallVector<BasicBlock *, 2> Preds;
587 /// Effective successors of the current block ignoring ignorable edges and
588 /// ignored backedges.
589 SmallVector<BasicBlock *, 2> Succs;
592 static const unsigned OverflowOccurredValue;
594 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
596 typedef MapTy::iterator ptr_iterator;
597 typedef MapTy::const_iterator ptr_const_iterator;
599 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
600 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
601 ptr_const_iterator top_down_ptr_begin() const {
602 return PerPtrTopDown.begin();
604 ptr_const_iterator top_down_ptr_end() const {
605 return PerPtrTopDown.end();
608 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
609 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
610 ptr_const_iterator bottom_up_ptr_begin() const {
611 return PerPtrBottomUp.begin();
613 ptr_const_iterator bottom_up_ptr_end() const {
614 return PerPtrBottomUp.end();
617 /// Mark this block as being an entry block, which has one path from the
618 /// entry by definition.
619 void SetAsEntry() { TopDownPathCount = 1; }
621 /// Mark this block as being an exit block, which has one path to an exit by
623 void SetAsExit() { BottomUpPathCount = 1; }
625 /// Attempt to find the PtrState object describing the top down state for
626 /// pointer Arg. Return a new initialized PtrState describing the top down
627 /// state for Arg if we do not find one.
628 PtrState &getPtrTopDownState(const Value *Arg) {
629 return PerPtrTopDown[Arg];
632 /// Attempt to find the PtrState object describing the bottom up state for
633 /// pointer Arg. Return a new initialized PtrState describing the bottom up
634 /// state for Arg if we do not find one.
635 PtrState &getPtrBottomUpState(const Value *Arg) {
636 return PerPtrBottomUp[Arg];
639 /// Attempt to find the PtrState object describing the bottom up state for
641 ptr_iterator findPtrBottomUpState(const Value *Arg) {
642 return PerPtrBottomUp.find(Arg);
645 void clearBottomUpPointers() {
646 PerPtrBottomUp.clear();
649 void clearTopDownPointers() {
650 PerPtrTopDown.clear();
653 void InitFromPred(const BBState &Other);
654 void InitFromSucc(const BBState &Other);
655 void MergePred(const BBState &Other);
656 void MergeSucc(const BBState &Other);
658 /// Compute the number of possible unique paths from an entry to an exit
659 /// which pass through this block. This is only valid after both the
660 /// top-down and bottom-up traversals are complete.
662 /// Returns true if overflow occurred. Returns false if overflow did not
664 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
665 if (TopDownPathCount == OverflowOccurredValue ||
666 BottomUpPathCount == OverflowOccurredValue)
668 unsigned long long Product =
669 (unsigned long long)TopDownPathCount*BottomUpPathCount;
670 // Overflow occurred if any of the upper bits of Product are set or if all
671 // the lower bits of Product are all set.
672 return (Product >> 32) ||
673 ((PathCount = Product) == OverflowOccurredValue);
676 // Specialized CFG utilities.
677 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
678 edge_iterator pred_begin() const { return Preds.begin(); }
679 edge_iterator pred_end() const { return Preds.end(); }
680 edge_iterator succ_begin() const { return Succs.begin(); }
681 edge_iterator succ_end() const { return Succs.end(); }
683 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
684 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
686 bool isExit() const { return Succs.empty(); }
689 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
692 void BBState::InitFromPred(const BBState &Other) {
693 PerPtrTopDown = Other.PerPtrTopDown;
694 TopDownPathCount = Other.TopDownPathCount;
697 void BBState::InitFromSucc(const BBState &Other) {
698 PerPtrBottomUp = Other.PerPtrBottomUp;
699 BottomUpPathCount = Other.BottomUpPathCount;
702 /// The top-down traversal uses this to merge information about predecessors to
703 /// form the initial state for a new block.
704 void BBState::MergePred(const BBState &Other) {
705 if (TopDownPathCount == OverflowOccurredValue)
708 // Other.TopDownPathCount can be 0, in which case it is either dead or a
709 // loop backedge. Loop backedges are special.
710 TopDownPathCount += Other.TopDownPathCount;
712 // In order to be consistent, we clear the top down pointers when by adding
713 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
715 if (TopDownPathCount == OverflowOccurredValue) {
716 clearTopDownPointers();
720 // Check for overflow. If we have overflow, fall back to conservative
722 if (TopDownPathCount < Other.TopDownPathCount) {
723 TopDownPathCount = OverflowOccurredValue;
724 clearTopDownPointers();
728 // For each entry in the other set, if our set has an entry with the same key,
729 // merge the entries. Otherwise, copy the entry and merge it with an empty
731 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
732 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
733 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
734 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
738 // For each entry in our set, if the other set doesn't have an entry with the
739 // same key, force it to merge with an empty entry.
740 for (ptr_iterator MI = top_down_ptr_begin(),
741 ME = top_down_ptr_end(); MI != ME; ++MI)
742 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
743 MI->second.Merge(PtrState(), /*TopDown=*/true);
746 /// The bottom-up traversal uses this to merge information about successors to
747 /// form the initial state for a new block.
748 void BBState::MergeSucc(const BBState &Other) {
749 if (BottomUpPathCount == OverflowOccurredValue)
752 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
753 // loop backedge. Loop backedges are special.
754 BottomUpPathCount += Other.BottomUpPathCount;
756 // In order to be consistent, we clear the top down pointers when by adding
757 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
759 if (BottomUpPathCount == OverflowOccurredValue) {
760 clearBottomUpPointers();
764 // Check for overflow. If we have overflow, fall back to conservative
766 if (BottomUpPathCount < Other.BottomUpPathCount) {
767 BottomUpPathCount = OverflowOccurredValue;
768 clearBottomUpPointers();
772 // For each entry in the other set, if our set has an entry with the
773 // same key, merge the entries. Otherwise, copy the entry and merge
774 // it with an empty entry.
775 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
776 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
777 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
778 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
782 // For each entry in our set, if the other set doesn't have an entry
783 // with the same key, force it to merge with an empty entry.
784 for (ptr_iterator MI = bottom_up_ptr_begin(),
785 ME = bottom_up_ptr_end(); MI != ME; ++MI)
786 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
787 MI->second.Merge(PtrState(), /*TopDown=*/false);
790 // Only enable ARC Annotations if we are building a debug version of
793 #define ARC_ANNOTATIONS
796 // Define some macros along the lines of DEBUG and some helper functions to make
797 // it cleaner to create annotations in the source code and to no-op when not
798 // building in debug mode.
799 #ifdef ARC_ANNOTATIONS
801 #include "llvm/Support/CommandLine.h"
803 /// Enable/disable ARC sequence annotations.
805 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
806 cl::desc("Enable emission of arc data flow analysis "
809 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
810 cl::desc("Disable check for cfg hazards when "
812 static cl::opt<std::string>
813 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
815 cl::desc("filter out all data flow annotations "
816 "but those that apply to the given "
817 "target llvm identifier."));
819 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
820 /// instruction so that we can track backwards when post processing via the llvm
821 /// arc annotation processor tool. If the function is an
822 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
826 // If pointer is a result of an instruction and it does not have a source
827 // MDNode it, attach a new MDNode onto it. If pointer is a result of
828 // an instruction and does have a source MDNode attached to it, return a
829 // reference to said Node. Otherwise just return 0.
830 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
832 if (!(Node = Inst->getMetadata(NodeId))) {
833 // We do not have any node. Generate and attatch the hash MDString to the
836 // We just use an MDString to ensure that this metadata gets written out
837 // of line at the module level and to provide a very simple format
838 // encoding the information herein. Both of these makes it simpler to
839 // parse the annotations by a simple external program.
841 raw_string_ostream os(Str);
842 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
843 << Inst->getName() << ")";
845 Hash = MDString::get(Inst->getContext(), os.str());
846 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
848 // We have a node. Grab its hash and return it.
849 assert(Node->getNumOperands() == 1 &&
850 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
851 Hash = cast<MDString>(Node->getOperand(0));
853 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
855 raw_string_ostream os(str);
856 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
858 Hash = MDString::get(Arg->getContext(), os.str());
864 static std::string SequenceToString(Sequence A) {
866 raw_string_ostream os(str);
871 /// Helper function to change a Sequence into a String object using our overload
872 /// for raw_ostream so we only have printing code in one location.
873 static MDString *SequenceToMDString(LLVMContext &Context,
875 return MDString::get(Context, SequenceToString(A));
878 /// A simple function to generate a MDNode which describes the change in state
879 /// for Value *Ptr caused by Instruction *Inst.
880 static void AppendMDNodeToInstForPtr(unsigned NodeId,
883 MDString *PtrSourceMDNodeID,
887 Value *tmp[3] = {PtrSourceMDNodeID,
888 SequenceToMDString(Inst->getContext(),
890 SequenceToMDString(Inst->getContext(),
892 Node = MDNode::get(Inst->getContext(),
893 ArrayRef<Value*>(tmp, 3));
895 Inst->setMetadata(NodeId, Node);
898 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
899 /// state of a pointer at the entrance to a basic block.
900 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
901 Value *Ptr, Sequence Seq) {
902 // If we have a target identifier, make sure that we match it before
904 if(!ARCAnnotationTargetIdentifier.empty() &&
905 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
908 Module *M = BB->getParent()->getParent();
909 LLVMContext &C = M->getContext();
910 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
911 Type *I8XX = PointerType::getUnqual(I8X);
912 Type *Params[] = {I8XX, I8XX};
913 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
914 ArrayRef<Type*>(Params, 2),
916 Constant *Callee = M->getOrInsertFunction(Name, FTy);
918 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
921 StringRef Tmp = Ptr->getName();
922 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
923 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
925 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
926 cast<Constant>(ActualPtrName), Tmp);
930 std::string SeqStr = SequenceToString(Seq);
931 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
932 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
934 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
935 cast<Constant>(ActualPtrName), SeqStr);
938 Builder.CreateCall2(Callee, PtrName, S);
941 /// Add to the end of the basic block llvm.ptr.annotations which show the state
942 /// of the pointer at the bottom of the basic block.
943 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
944 Value *Ptr, Sequence Seq) {
945 // If we have a target identifier, make sure that we match it before emitting
947 if(!ARCAnnotationTargetIdentifier.empty() &&
948 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
951 Module *M = BB->getParent()->getParent();
952 LLVMContext &C = M->getContext();
953 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
954 Type *I8XX = PointerType::getUnqual(I8X);
955 Type *Params[] = {I8XX, I8XX};
956 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
957 ArrayRef<Type*>(Params, 2),
959 Constant *Callee = M->getOrInsertFunction(Name, FTy);
961 IRBuilder<> Builder(BB, std::prev(BB->end()));
964 StringRef Tmp = Ptr->getName();
965 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
966 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
968 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
969 cast<Constant>(ActualPtrName), Tmp);
973 std::string SeqStr = SequenceToString(Seq);
974 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
975 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
977 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
978 cast<Constant>(ActualPtrName), SeqStr);
980 Builder.CreateCall2(Callee, PtrName, S);
983 /// Adds a source annotation to pointer and a state change annotation to Inst
984 /// referencing the source annotation and the old/new state of pointer.
985 static void GenerateARCAnnotation(unsigned InstMDId,
991 if (EnableARCAnnotations) {
992 // If we have a target identifier, make sure that we match it before
993 // emitting an annotation.
994 if(!ARCAnnotationTargetIdentifier.empty() &&
995 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
998 // First generate the source annotation on our pointer. This will return an
999 // MDString* if Ptr actually comes from an instruction implying we can put
1000 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
1001 // then we know that our pointer is from an Argument so we put a reference
1002 // to the argument number.
1004 // The point of this is to make it easy for the
1005 // llvm-arc-annotation-processor tool to cross reference where the source
1006 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
1007 // information via debug info for backends to use (since why would anyone
1008 // need such a thing from LLVM IR besides in non-standard cases
1010 MDString *SourcePtrMDNode =
1011 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
1012 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
1017 // The actual interface for accessing the above functionality is defined via
1018 // some simple macros which are defined below. We do this so that the user does
1019 // not need to pass in what metadata id is needed resulting in cleaner code and
1020 // additionally since it provides an easy way to conditionally no-op all
1021 // annotation support in a non-debug build.
1023 /// Use this macro to annotate a sequence state change when processing
1024 /// instructions bottom up,
1025 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
1026 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
1027 ARCAnnotationProvenanceSourceMDKind, (inst), \
1028 const_cast<Value*>(ptr), (old), (new))
1029 /// Use this macro to annotate a sequence state change when processing
1030 /// instructions top down.
1031 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
1032 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
1033 ARCAnnotationProvenanceSourceMDKind, (inst), \
1034 const_cast<Value*>(ptr), (old), (new))
1036 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
1038 if (EnableARCAnnotations) { \
1039 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
1040 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
1041 Value *Ptr = const_cast<Value*>(I->first); \
1042 Sequence Seq = I->second.GetSeq(); \
1043 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
1048 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
1049 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
1050 Entrance, bottom_up)
1051 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
1052 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
1053 Terminator, bottom_up)
1054 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
1055 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
1057 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
1058 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
1059 Terminator, top_down)
1061 #else // !ARC_ANNOTATION
1062 // If annotations are off, noop.
1063 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
1064 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
1065 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
1066 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
1067 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
1068 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
1069 #endif // !ARC_ANNOTATION
1072 /// \brief The main ARC optimization pass.
1073 class ObjCARCOpt : public FunctionPass {
1075 ProvenanceAnalysis PA;
1076 ARCRuntimeEntryPoints EP;
1078 // This is used to track if a pointer is stored into an alloca.
1079 DenseSet<const Value *> MultiOwnersSet;
1081 /// A flag indicating whether this optimization pass should run.
1084 /// Flags which determine whether each of the interesting runtine functions
1085 /// is in fact used in the current function.
1086 unsigned UsedInThisFunction;
1088 /// The Metadata Kind for clang.imprecise_release metadata.
1089 unsigned ImpreciseReleaseMDKind;
1091 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1092 unsigned CopyOnEscapeMDKind;
1094 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1095 unsigned NoObjCARCExceptionsMDKind;
1097 #ifdef ARC_ANNOTATIONS
1098 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1099 unsigned ARCAnnotationBottomUpMDKind;
1100 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1101 unsigned ARCAnnotationTopDownMDKind;
1102 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1103 unsigned ARCAnnotationProvenanceSourceMDKind;
1104 #endif // ARC_ANNOATIONS
1106 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1107 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1108 InstructionClass &Class);
1109 void OptimizeIndividualCalls(Function &F);
1111 void CheckForCFGHazards(const BasicBlock *BB,
1112 DenseMap<const BasicBlock *, BBState> &BBStates,
1113 BBState &MyStates) const;
1114 bool VisitInstructionBottomUp(Instruction *Inst,
1116 MapVector<Value *, RRInfo> &Retains,
1118 bool VisitBottomUp(BasicBlock *BB,
1119 DenseMap<const BasicBlock *, BBState> &BBStates,
1120 MapVector<Value *, RRInfo> &Retains);
1121 bool VisitInstructionTopDown(Instruction *Inst,
1122 DenseMap<Value *, RRInfo> &Releases,
1124 bool VisitTopDown(BasicBlock *BB,
1125 DenseMap<const BasicBlock *, BBState> &BBStates,
1126 DenseMap<Value *, RRInfo> &Releases);
1127 bool Visit(Function &F,
1128 DenseMap<const BasicBlock *, BBState> &BBStates,
1129 MapVector<Value *, RRInfo> &Retains,
1130 DenseMap<Value *, RRInfo> &Releases);
1132 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1133 MapVector<Value *, RRInfo> &Retains,
1134 DenseMap<Value *, RRInfo> &Releases,
1135 SmallVectorImpl<Instruction *> &DeadInsts,
1138 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1139 MapVector<Value *, RRInfo> &Retains,
1140 DenseMap<Value *, RRInfo> &Releases,
1142 SmallVectorImpl<Instruction *> &NewRetains,
1143 SmallVectorImpl<Instruction *> &NewReleases,
1144 SmallVectorImpl<Instruction *> &DeadInsts,
1145 RRInfo &RetainsToMove,
1146 RRInfo &ReleasesToMove,
1149 bool &AnyPairsCompletelyEliminated);
1151 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1152 MapVector<Value *, RRInfo> &Retains,
1153 DenseMap<Value *, RRInfo> &Releases,
1156 void OptimizeWeakCalls(Function &F);
1158 bool OptimizeSequences(Function &F);
1160 void OptimizeReturns(Function &F);
1163 void GatherStatistics(Function &F, bool AfterOptimization = false);
1166 void getAnalysisUsage(AnalysisUsage &AU) const override;
1167 bool doInitialization(Module &M) override;
1168 bool runOnFunction(Function &F) override;
1169 void releaseMemory() override;
1173 ObjCARCOpt() : FunctionPass(ID) {
1174 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1179 char ObjCARCOpt::ID = 0;
1180 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1181 "objc-arc", "ObjC ARC optimization", false, false)
1182 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1183 INITIALIZE_PASS_END(ObjCARCOpt,
1184 "objc-arc", "ObjC ARC optimization", false, false)
1186 Pass *llvm::createObjCARCOptPass() {
1187 return new ObjCARCOpt();
1190 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1191 AU.addRequired<ObjCARCAliasAnalysis>();
1192 AU.addRequired<AliasAnalysis>();
1193 // ARC optimization doesn't currently split critical edges.
1194 AU.setPreservesCFG();
1197 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1198 /// not a return value. Or, if it can be paired with an
1199 /// objc_autoreleaseReturnValue, delete the pair and return true.
1201 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1202 // Check for the argument being from an immediately preceding call or invoke.
1203 const Value *Arg = GetObjCArg(RetainRV);
1204 ImmutableCallSite CS(Arg);
1205 if (const Instruction *Call = CS.getInstruction()) {
1206 if (Call->getParent() == RetainRV->getParent()) {
1207 BasicBlock::const_iterator I = Call;
1209 while (IsNoopInstruction(I)) ++I;
1210 if (&*I == RetainRV)
1212 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1213 BasicBlock *RetainRVParent = RetainRV->getParent();
1214 if (II->getNormalDest() == RetainRVParent) {
1215 BasicBlock::const_iterator I = RetainRVParent->begin();
1216 while (IsNoopInstruction(I)) ++I;
1217 if (&*I == RetainRV)
1223 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1224 // pointer. In this case, we can delete the pair.
1225 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1227 do --I; while (I != Begin && IsNoopInstruction(I));
1228 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1229 GetObjCArg(I) == Arg) {
1233 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1234 << "Erasing " << *RetainRV << "\n");
1236 EraseInstruction(I);
1237 EraseInstruction(RetainRV);
1242 // Turn it to a plain objc_retain.
1246 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1247 "objc_retain since the operand is not a return value.\n"
1248 "Old = " << *RetainRV << "\n");
1250 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1251 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
1253 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1258 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1259 /// used as a return value.
1261 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1262 InstructionClass &Class) {
1263 // Check for a return of the pointer value.
1264 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1265 SmallVector<const Value *, 2> Users;
1266 Users.push_back(Ptr);
1268 Ptr = Users.pop_back_val();
1269 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1271 const User *I = *UI;
1272 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1274 if (isa<BitCastInst>(I))
1277 } while (!Users.empty());
1282 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1283 "objc_autorelease since its operand is not used as a return "
1285 "Old = " << *AutoreleaseRV << "\n");
1287 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1288 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
1289 AutoreleaseRVCI->setCalledFunction(NewDecl);
1290 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1291 Class = IC_Autorelease;
1293 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1297 /// Visit each call, one at a time, and make simplifications without doing any
1298 /// additional analysis.
1299 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1300 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1301 // Reset all the flags in preparation for recomputing them.
1302 UsedInThisFunction = 0;
1304 // Visit all objc_* calls in F.
1305 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1306 Instruction *Inst = &*I++;
1308 InstructionClass Class = GetBasicInstructionClass(Inst);
1310 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1315 // Delete no-op casts. These function calls have special semantics, but
1316 // the semantics are entirely implemented via lowering in the front-end,
1317 // so by the time they reach the optimizer, they are just no-op calls
1318 // which return their argument.
1320 // There are gray areas here, as the ability to cast reference-counted
1321 // pointers to raw void* and back allows code to break ARC assumptions,
1322 // however these are currently considered to be unimportant.
1326 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1327 EraseInstruction(Inst);
1330 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1333 case IC_LoadWeakRetained:
1335 case IC_DestroyWeak: {
1336 CallInst *CI = cast<CallInst>(Inst);
1337 if (IsNullOrUndef(CI->getArgOperand(0))) {
1339 Type *Ty = CI->getArgOperand(0)->getType();
1340 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1341 Constant::getNullValue(Ty),
1343 llvm::Value *NewValue = UndefValue::get(CI->getType());
1344 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1345 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1346 CI->replaceAllUsesWith(NewValue);
1347 CI->eraseFromParent();
1354 CallInst *CI = cast<CallInst>(Inst);
1355 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1356 IsNullOrUndef(CI->getArgOperand(1))) {
1358 Type *Ty = CI->getArgOperand(0)->getType();
1359 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1360 Constant::getNullValue(Ty),
1363 llvm::Value *NewValue = UndefValue::get(CI->getType());
1364 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1365 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1367 CI->replaceAllUsesWith(NewValue);
1368 CI->eraseFromParent();
1374 if (OptimizeRetainRVCall(F, Inst))
1377 case IC_AutoreleaseRV:
1378 OptimizeAutoreleaseRVCall(F, Inst, Class);
1382 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1383 if (IsAutorelease(Class) && Inst->use_empty()) {
1384 CallInst *Call = cast<CallInst>(Inst);
1385 const Value *Arg = Call->getArgOperand(0);
1386 Arg = FindSingleUseIdentifiedObject(Arg);
1391 // Create the declaration lazily.
1392 LLVMContext &C = Inst->getContext();
1394 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
1395 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
1397 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1399 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1400 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1401 << *NewCall << "\n");
1403 EraseInstruction(Call);
1409 // For functions which can never be passed stack arguments, add
1411 if (IsAlwaysTail(Class)) {
1413 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1414 "passed stack args: " << *Inst << "\n");
1415 cast<CallInst>(Inst)->setTailCall();
1418 // Ensure that functions that can never have a "tail" keyword due to the
1419 // semantics of ARC truly do not do so.
1420 if (IsNeverTail(Class)) {
1422 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1424 cast<CallInst>(Inst)->setTailCall(false);
1427 // Set nounwind as needed.
1428 if (IsNoThrow(Class)) {
1430 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1432 cast<CallInst>(Inst)->setDoesNotThrow();
1435 if (!IsNoopOnNull(Class)) {
1436 UsedInThisFunction |= 1 << Class;
1440 const Value *Arg = GetObjCArg(Inst);
1442 // ARC calls with null are no-ops. Delete them.
1443 if (IsNullOrUndef(Arg)) {
1446 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1448 EraseInstruction(Inst);
1452 // Keep track of which of retain, release, autorelease, and retain_block
1453 // are actually present in this function.
1454 UsedInThisFunction |= 1 << Class;
1456 // If Arg is a PHI, and one or more incoming values to the
1457 // PHI are null, and the call is control-equivalent to the PHI, and there
1458 // are no relevant side effects between the PHI and the call, the call
1459 // could be pushed up to just those paths with non-null incoming values.
1460 // For now, don't bother splitting critical edges for this.
1461 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1462 Worklist.push_back(std::make_pair(Inst, Arg));
1464 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1468 const PHINode *PN = dyn_cast<PHINode>(Arg);
1471 // Determine if the PHI has any null operands, or any incoming
1473 bool HasNull = false;
1474 bool HasCriticalEdges = false;
1475 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1477 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1478 if (IsNullOrUndef(Incoming))
1480 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1481 .getNumSuccessors() != 1) {
1482 HasCriticalEdges = true;
1486 // If we have null operands and no critical edges, optimize.
1487 if (!HasCriticalEdges && HasNull) {
1488 SmallPtrSet<Instruction *, 4> DependingInstructions;
1489 SmallPtrSet<const BasicBlock *, 4> Visited;
1491 // Check that there is nothing that cares about the reference
1492 // count between the call and the phi.
1495 case IC_RetainBlock:
1496 // These can always be moved up.
1499 // These can't be moved across things that care about the retain
1501 FindDependencies(NeedsPositiveRetainCount, Arg,
1502 Inst->getParent(), Inst,
1503 DependingInstructions, Visited, PA);
1505 case IC_Autorelease:
1506 // These can't be moved across autorelease pool scope boundaries.
1507 FindDependencies(AutoreleasePoolBoundary, Arg,
1508 Inst->getParent(), Inst,
1509 DependingInstructions, Visited, PA);
1512 case IC_AutoreleaseRV:
1513 // Don't move these; the RV optimization depends on the autoreleaseRV
1514 // being tail called, and the retainRV being immediately after a call
1515 // (which might still happen if we get lucky with codegen layout, but
1516 // it's not worth taking the chance).
1519 llvm_unreachable("Invalid dependence flavor");
1522 if (DependingInstructions.size() == 1 &&
1523 *DependingInstructions.begin() == PN) {
1526 // Clone the call into each predecessor that has a non-null value.
1527 CallInst *CInst = cast<CallInst>(Inst);
1528 Type *ParamTy = CInst->getArgOperand(0)->getType();
1529 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1531 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1532 if (!IsNullOrUndef(Incoming)) {
1533 CallInst *Clone = cast<CallInst>(CInst->clone());
1534 Value *Op = PN->getIncomingValue(i);
1535 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1536 if (Op->getType() != ParamTy)
1537 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1538 Clone->setArgOperand(0, Op);
1539 Clone->insertBefore(InsertPos);
1541 DEBUG(dbgs() << "Cloning "
1543 "And inserting clone at " << *InsertPos << "\n");
1544 Worklist.push_back(std::make_pair(Clone, Incoming));
1547 // Erase the original call.
1548 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1549 EraseInstruction(CInst);
1553 } while (!Worklist.empty());
1557 /// If we have a top down pointer in the S_Use state, make sure that there are
1558 /// no CFG hazards by checking the states of various bottom up pointers.
1559 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1560 const bool SuccSRRIKnownSafe,
1562 bool &SomeSuccHasSame,
1563 bool &AllSuccsHaveSame,
1564 bool &NotAllSeqEqualButKnownSafe,
1565 bool &ShouldContinue) {
1567 case S_CanRelease: {
1568 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1569 S.ClearSequenceProgress();
1572 S.SetCFGHazardAfflicted(true);
1573 ShouldContinue = true;
1577 SomeSuccHasSame = true;
1581 case S_MovableRelease:
1582 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1583 AllSuccsHaveSame = false;
1585 NotAllSeqEqualButKnownSafe = true;
1588 llvm_unreachable("bottom-up pointer in retain state!");
1590 llvm_unreachable("This should have been handled earlier.");
1594 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1595 /// there are no CFG hazards by checking the states of various bottom up
1597 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1598 const bool SuccSRRIKnownSafe,
1600 bool &SomeSuccHasSame,
1601 bool &AllSuccsHaveSame,
1602 bool &NotAllSeqEqualButKnownSafe) {
1605 SomeSuccHasSame = true;
1609 case S_MovableRelease:
1611 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1612 AllSuccsHaveSame = false;
1614 NotAllSeqEqualButKnownSafe = true;
1617 llvm_unreachable("bottom-up pointer in retain state!");
1619 llvm_unreachable("This should have been handled earlier.");
1623 /// Check for critical edges, loop boundaries, irreducible control flow, or
1624 /// other CFG structures where moving code across the edge would result in it
1625 /// being executed more.
1627 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1628 DenseMap<const BasicBlock *, BBState> &BBStates,
1629 BBState &MyStates) const {
1630 // If any top-down local-use or possible-dec has a succ which is earlier in
1631 // the sequence, forget it.
1632 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1633 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1634 PtrState &S = I->second;
1635 const Sequence Seq = I->second.GetSeq();
1637 // We only care about S_Retain, S_CanRelease, and S_Use.
1641 // Make sure that if extra top down states are added in the future that this
1642 // code is updated to handle it.
1643 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1644 "Unknown top down sequence state.");
1646 const Value *Arg = I->first;
1647 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1648 bool SomeSuccHasSame = false;
1649 bool AllSuccsHaveSame = true;
1650 bool NotAllSeqEqualButKnownSafe = false;
1652 succ_const_iterator SI(TI), SE(TI, false);
1654 for (; SI != SE; ++SI) {
1655 // If VisitBottomUp has pointer information for this successor, take
1656 // what we know about it.
1657 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1659 assert(BBI != BBStates.end());
1660 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1661 const Sequence SuccSSeq = SuccS.GetSeq();
1663 // If bottom up, the pointer is in an S_None state, clear the sequence
1664 // progress since the sequence in the bottom up state finished
1665 // suggesting a mismatch in between retains/releases. This is true for
1666 // all three cases that we are handling here: S_Retain, S_Use, and
1668 if (SuccSSeq == S_None) {
1669 S.ClearSequenceProgress();
1673 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1675 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1677 // *NOTE* We do not use Seq from above here since we are allowing for
1678 // S.GetSeq() to change while we are visiting basic blocks.
1679 switch(S.GetSeq()) {
1681 bool ShouldContinue = false;
1682 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1683 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1689 case S_CanRelease: {
1690 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1691 SomeSuccHasSame, AllSuccsHaveSame,
1692 NotAllSeqEqualButKnownSafe);
1699 case S_MovableRelease:
1704 // If the state at the other end of any of the successor edges
1705 // matches the current state, require all edges to match. This
1706 // guards against loops in the middle of a sequence.
1707 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1708 S.ClearSequenceProgress();
1709 } else if (NotAllSeqEqualButKnownSafe) {
1710 // If we would have cleared the state foregoing the fact that we are known
1711 // safe, stop code motion. This is because whether or not it is safe to
1712 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1713 // are allowed to perform code motion.
1714 S.SetCFGHazardAfflicted(true);
1720 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1722 MapVector<Value *, RRInfo> &Retains,
1723 BBState &MyStates) {
1724 bool NestingDetected = false;
1725 InstructionClass Class = GetInstructionClass(Inst);
1726 const Value *Arg = 0;
1728 DEBUG(dbgs() << "Class: " << Class << "\n");
1732 Arg = GetObjCArg(Inst);
1734 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1736 // If we see two releases in a row on the same pointer. If so, make
1737 // a note, and we'll cicle back to revisit it after we've
1738 // hopefully eliminated the second release, which may allow us to
1739 // eliminate the first release too.
1740 // Theoretically we could implement removal of nested retain+release
1741 // pairs by making PtrState hold a stack of states, but this is
1742 // simple and avoids adding overhead for the non-nested case.
1743 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1744 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1745 NestingDetected = true;
1748 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1749 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1750 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1751 S.ResetSequenceProgress(NewSeq);
1752 S.SetReleaseMetadata(ReleaseMetadata);
1753 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1754 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1756 S.SetKnownPositiveRefCount();
1759 case IC_RetainBlock:
1760 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1761 // objc_retainBlocks to objc_retains. Thus at this point any
1762 // objc_retainBlocks that we see are not optimizable.
1766 Arg = GetObjCArg(Inst);
1768 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1769 S.SetKnownPositiveRefCount();
1771 Sequence OldSeq = S.GetSeq();
1775 case S_MovableRelease:
1777 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1778 // imprecise release, clear our reverse insertion points.
1779 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1780 S.ClearReverseInsertPts();
1783 // Don't do retain+release tracking for IC_RetainRV, because it's
1784 // better to let it remain as the first instruction after a call.
1785 if (Class != IC_RetainRV)
1786 Retains[Inst] = S.GetRRInfo();
1787 S.ClearSequenceProgress();
1792 llvm_unreachable("bottom-up pointer in retain state!");
1794 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1795 // A retain moving bottom up can be a use.
1798 case IC_AutoreleasepoolPop:
1799 // Conservatively, clear MyStates for all known pointers.
1800 MyStates.clearBottomUpPointers();
1801 return NestingDetected;
1802 case IC_AutoreleasepoolPush:
1804 // These are irrelevant.
1805 return NestingDetected;
1807 // If we have a store into an alloca of a pointer we are tracking, the
1808 // pointer has multiple owners implying that we must be more conservative.
1810 // This comes up in the context of a pointer being ``KnownSafe''. In the
1811 // presence of a block being initialized, the frontend will emit the
1812 // objc_retain on the original pointer and the release on the pointer loaded
1813 // from the alloca. The optimizer will through the provenance analysis
1814 // realize that the two are related, but since we only require KnownSafe in
1815 // one direction, will match the inner retain on the original pointer with
1816 // the guard release on the original pointer. This is fixed by ensuring that
1817 // in the presence of allocas we only unconditionally remove pointers if
1818 // both our retain and our release are KnownSafe.
1819 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1820 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1821 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
1822 StripPointerCastsAndObjCCalls(SI->getValueOperand()));
1823 if (I != MyStates.bottom_up_ptr_end())
1824 MultiOwnersSet.insert(I->first);
1832 // Consider any other possible effects of this instruction on each
1833 // pointer being tracked.
1834 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1835 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1836 const Value *Ptr = MI->first;
1838 continue; // Handled above.
1839 PtrState &S = MI->second;
1840 Sequence Seq = S.GetSeq();
1842 // Check for possible releases.
1843 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1844 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1846 S.ClearKnownPositiveRefCount();
1849 S.SetSeq(S_CanRelease);
1850 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1854 case S_MovableRelease:
1859 llvm_unreachable("bottom-up pointer in retain state!");
1863 // Check for possible direct uses.
1866 case S_MovableRelease:
1867 if (CanUse(Inst, Ptr, PA, Class)) {
1868 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1870 assert(!S.HasReverseInsertPts());
1871 // If this is an invoke instruction, we're scanning it as part of
1872 // one of its successor blocks, since we can't insert code after it
1873 // in its own block, and we don't want to split critical edges.
1874 if (isa<InvokeInst>(Inst))
1875 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1877 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1879 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1880 } else if (Seq == S_Release && IsUser(Class)) {
1881 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
1883 // Non-movable releases depend on any possible objc pointer use.
1885 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
1886 assert(!S.HasReverseInsertPts());
1887 // As above; handle invoke specially.
1888 if (isa<InvokeInst>(Inst))
1889 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1891 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1895 if (CanUse(Inst, Ptr, PA, Class)) {
1896 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
1899 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1907 llvm_unreachable("bottom-up pointer in retain state!");
1911 return NestingDetected;
1915 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1916 DenseMap<const BasicBlock *, BBState> &BBStates,
1917 MapVector<Value *, RRInfo> &Retains) {
1919 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1921 bool NestingDetected = false;
1922 BBState &MyStates = BBStates[BB];
1924 // Merge the states from each successor to compute the initial state
1925 // for the current block.
1926 BBState::edge_iterator SI(MyStates.succ_begin()),
1927 SE(MyStates.succ_end());
1929 const BasicBlock *Succ = *SI;
1930 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1931 assert(I != BBStates.end());
1932 MyStates.InitFromSucc(I->second);
1934 for (; SI != SE; ++SI) {
1936 I = BBStates.find(Succ);
1937 assert(I != BBStates.end());
1938 MyStates.MergeSucc(I->second);
1942 // If ARC Annotations are enabled, output the current state of pointers at the
1943 // bottom of the basic block.
1944 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
1946 // Visit all the instructions, bottom-up.
1947 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1948 Instruction *Inst = std::prev(I);
1950 // Invoke instructions are visited as part of their successors (below).
1951 if (isa<InvokeInst>(Inst))
1954 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1956 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1959 // If there's a predecessor with an invoke, visit the invoke as if it were
1960 // part of this block, since we can't insert code after an invoke in its own
1961 // block, and we don't want to split critical edges.
1962 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1963 PE(MyStates.pred_end()); PI != PE; ++PI) {
1964 BasicBlock *Pred = *PI;
1965 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1966 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1969 // If ARC Annotations are enabled, output the current state of pointers at the
1970 // top of the basic block.
1971 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
1973 return NestingDetected;
1977 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1978 DenseMap<Value *, RRInfo> &Releases,
1979 BBState &MyStates) {
1980 bool NestingDetected = false;
1981 InstructionClass Class = GetInstructionClass(Inst);
1982 const Value *Arg = 0;
1985 case IC_RetainBlock:
1986 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1987 // objc_retainBlocks to objc_retains. Thus at this point any
1988 // objc_retainBlocks that we see are not optimizable.
1992 Arg = GetObjCArg(Inst);
1994 PtrState &S = MyStates.getPtrTopDownState(Arg);
1996 // Don't do retain+release tracking for IC_RetainRV, because it's
1997 // better to let it remain as the first instruction after a call.
1998 if (Class != IC_RetainRV) {
1999 // If we see two retains in a row on the same pointer. If so, make
2000 // a note, and we'll cicle back to revisit it after we've
2001 // hopefully eliminated the second retain, which may allow us to
2002 // eliminate the first retain too.
2003 // Theoretically we could implement removal of nested retain+release
2004 // pairs by making PtrState hold a stack of states, but this is
2005 // simple and avoids adding overhead for the non-nested case.
2006 if (S.GetSeq() == S_Retain)
2007 NestingDetected = true;
2009 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2010 S.ResetSequenceProgress(S_Retain);
2011 S.SetKnownSafe(S.HasKnownPositiveRefCount());
2015 S.SetKnownPositiveRefCount();
2017 // A retain can be a potential use; procede to the generic checking
2022 Arg = GetObjCArg(Inst);
2024 PtrState &S = MyStates.getPtrTopDownState(Arg);
2025 S.ClearKnownPositiveRefCount();
2027 Sequence OldSeq = S.GetSeq();
2029 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2034 if (OldSeq == S_Retain || ReleaseMetadata != 0)
2035 S.ClearReverseInsertPts();
2038 S.SetReleaseMetadata(ReleaseMetadata);
2039 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
2040 Releases[Inst] = S.GetRRInfo();
2041 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2042 S.ClearSequenceProgress();
2048 case S_MovableRelease:
2049 llvm_unreachable("top-down pointer in release state!");
2053 case IC_AutoreleasepoolPop:
2054 // Conservatively, clear MyStates for all known pointers.
2055 MyStates.clearTopDownPointers();
2056 return NestingDetected;
2057 case IC_AutoreleasepoolPush:
2059 // These are irrelevant.
2060 return NestingDetected;
2065 // Consider any other possible effects of this instruction on each
2066 // pointer being tracked.
2067 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2068 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2069 const Value *Ptr = MI->first;
2071 continue; // Handled above.
2072 PtrState &S = MI->second;
2073 Sequence Seq = S.GetSeq();
2075 // Check for possible releases.
2076 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2077 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2079 S.ClearKnownPositiveRefCount();
2082 S.SetSeq(S_CanRelease);
2083 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2084 assert(!S.HasReverseInsertPts());
2085 S.InsertReverseInsertPt(Inst);
2087 // One call can't cause a transition from S_Retain to S_CanRelease
2088 // and S_CanRelease to S_Use. If we've made the first transition,
2097 case S_MovableRelease:
2098 llvm_unreachable("top-down pointer in release state!");
2102 // Check for possible direct uses.
2105 if (CanUse(Inst, Ptr, PA, Class)) {
2106 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2109 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2118 case S_MovableRelease:
2119 llvm_unreachable("top-down pointer in release state!");
2123 return NestingDetected;
2127 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2128 DenseMap<const BasicBlock *, BBState> &BBStates,
2129 DenseMap<Value *, RRInfo> &Releases) {
2130 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2131 bool NestingDetected = false;
2132 BBState &MyStates = BBStates[BB];
2134 // Merge the states from each predecessor to compute the initial state
2135 // for the current block.
2136 BBState::edge_iterator PI(MyStates.pred_begin()),
2137 PE(MyStates.pred_end());
2139 const BasicBlock *Pred = *PI;
2140 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2141 assert(I != BBStates.end());
2142 MyStates.InitFromPred(I->second);
2144 for (; PI != PE; ++PI) {
2146 I = BBStates.find(Pred);
2147 assert(I != BBStates.end());
2148 MyStates.MergePred(I->second);
2152 // If ARC Annotations are enabled, output the current state of pointers at the
2153 // top of the basic block.
2154 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2156 // Visit all the instructions, top-down.
2157 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2158 Instruction *Inst = I;
2160 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2162 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2165 // If ARC Annotations are enabled, output the current state of pointers at the
2166 // bottom of the basic block.
2167 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2169 #ifdef ARC_ANNOTATIONS
2170 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2172 CheckForCFGHazards(BB, BBStates, MyStates);
2173 return NestingDetected;
2177 ComputePostOrders(Function &F,
2178 SmallVectorImpl<BasicBlock *> &PostOrder,
2179 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2180 unsigned NoObjCARCExceptionsMDKind,
2181 DenseMap<const BasicBlock *, BBState> &BBStates) {
2182 /// The visited set, for doing DFS walks.
2183 SmallPtrSet<BasicBlock *, 16> Visited;
2185 // Do DFS, computing the PostOrder.
2186 SmallPtrSet<BasicBlock *, 16> OnStack;
2187 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2189 // Functions always have exactly one entry block, and we don't have
2190 // any other block that we treat like an entry block.
2191 BasicBlock *EntryBB = &F.getEntryBlock();
2192 BBState &MyStates = BBStates[EntryBB];
2193 MyStates.SetAsEntry();
2194 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2195 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2196 Visited.insert(EntryBB);
2197 OnStack.insert(EntryBB);
2200 BasicBlock *CurrBB = SuccStack.back().first;
2201 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2202 succ_iterator SE(TI, false);
2204 while (SuccStack.back().second != SE) {
2205 BasicBlock *SuccBB = *SuccStack.back().second++;
2206 if (Visited.insert(SuccBB)) {
2207 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2208 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2209 BBStates[CurrBB].addSucc(SuccBB);
2210 BBState &SuccStates = BBStates[SuccBB];
2211 SuccStates.addPred(CurrBB);
2212 OnStack.insert(SuccBB);
2216 if (!OnStack.count(SuccBB)) {
2217 BBStates[CurrBB].addSucc(SuccBB);
2218 BBStates[SuccBB].addPred(CurrBB);
2221 OnStack.erase(CurrBB);
2222 PostOrder.push_back(CurrBB);
2223 SuccStack.pop_back();
2224 } while (!SuccStack.empty());
2228 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2229 // Functions may have many exits, and there also blocks which we treat
2230 // as exits due to ignored edges.
2231 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2232 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2233 BasicBlock *ExitBB = I;
2234 BBState &MyStates = BBStates[ExitBB];
2235 if (!MyStates.isExit())
2238 MyStates.SetAsExit();
2240 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2241 Visited.insert(ExitBB);
2242 while (!PredStack.empty()) {
2243 reverse_dfs_next_succ:
2244 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2245 while (PredStack.back().second != PE) {
2246 BasicBlock *BB = *PredStack.back().second++;
2247 if (Visited.insert(BB)) {
2248 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2249 goto reverse_dfs_next_succ;
2252 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2257 // Visit the function both top-down and bottom-up.
2259 ObjCARCOpt::Visit(Function &F,
2260 DenseMap<const BasicBlock *, BBState> &BBStates,
2261 MapVector<Value *, RRInfo> &Retains,
2262 DenseMap<Value *, RRInfo> &Releases) {
2264 // Use reverse-postorder traversals, because we magically know that loops
2265 // will be well behaved, i.e. they won't repeatedly call retain on a single
2266 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2267 // class here because we want the reverse-CFG postorder to consider each
2268 // function exit point, and we want to ignore selected cycle edges.
2269 SmallVector<BasicBlock *, 16> PostOrder;
2270 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2271 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2272 NoObjCARCExceptionsMDKind,
2275 // Use reverse-postorder on the reverse CFG for bottom-up.
2276 bool BottomUpNestingDetected = false;
2277 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2278 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2280 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2282 // Use reverse-postorder for top-down.
2283 bool TopDownNestingDetected = false;
2284 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2285 PostOrder.rbegin(), E = PostOrder.rend();
2287 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2289 return TopDownNestingDetected && BottomUpNestingDetected;
2292 /// Move the calls in RetainsToMove and ReleasesToMove.
2293 void ObjCARCOpt::MoveCalls(Value *Arg,
2294 RRInfo &RetainsToMove,
2295 RRInfo &ReleasesToMove,
2296 MapVector<Value *, RRInfo> &Retains,
2297 DenseMap<Value *, RRInfo> &Releases,
2298 SmallVectorImpl<Instruction *> &DeadInsts,
2300 Type *ArgTy = Arg->getType();
2301 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2303 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2305 // Insert the new retain and release calls.
2306 for (SmallPtrSet<Instruction *, 2>::const_iterator
2307 PI = ReleasesToMove.ReverseInsertPts.begin(),
2308 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2309 Instruction *InsertPt = *PI;
2310 Value *MyArg = ArgTy == ParamTy ? Arg :
2311 new BitCastInst(Arg, ParamTy, "", InsertPt);
2312 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2313 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2314 Call->setDoesNotThrow();
2315 Call->setTailCall();
2317 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
2318 "At insertion point: " << *InsertPt << "\n");
2320 for (SmallPtrSet<Instruction *, 2>::const_iterator
2321 PI = RetainsToMove.ReverseInsertPts.begin(),
2322 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2323 Instruction *InsertPt = *PI;
2324 Value *MyArg = ArgTy == ParamTy ? Arg :
2325 new BitCastInst(Arg, ParamTy, "", InsertPt);
2326 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
2327 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2328 // Attach a clang.imprecise_release metadata tag, if appropriate.
2329 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2330 Call->setMetadata(ImpreciseReleaseMDKind, M);
2331 Call->setDoesNotThrow();
2332 if (ReleasesToMove.IsTailCallRelease)
2333 Call->setTailCall();
2335 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2336 "At insertion point: " << *InsertPt << "\n");
2339 // Delete the original retain and release calls.
2340 for (SmallPtrSet<Instruction *, 2>::const_iterator
2341 AI = RetainsToMove.Calls.begin(),
2342 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2343 Instruction *OrigRetain = *AI;
2344 Retains.blot(OrigRetain);
2345 DeadInsts.push_back(OrigRetain);
2346 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2348 for (SmallPtrSet<Instruction *, 2>::const_iterator
2349 AI = ReleasesToMove.Calls.begin(),
2350 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2351 Instruction *OrigRelease = *AI;
2352 Releases.erase(OrigRelease);
2353 DeadInsts.push_back(OrigRelease);
2354 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2360 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2362 MapVector<Value *, RRInfo> &Retains,
2363 DenseMap<Value *, RRInfo> &Releases,
2365 SmallVectorImpl<Instruction *> &NewRetains,
2366 SmallVectorImpl<Instruction *> &NewReleases,
2367 SmallVectorImpl<Instruction *> &DeadInsts,
2368 RRInfo &RetainsToMove,
2369 RRInfo &ReleasesToMove,
2372 bool &AnyPairsCompletelyEliminated) {
2373 // If a pair happens in a region where it is known that the reference count
2374 // is already incremented, we can similarly ignore possible decrements unless
2375 // we are dealing with a retainable object with multiple provenance sources.
2376 bool KnownSafeTD = true, KnownSafeBU = true;
2377 bool MultipleOwners = false;
2378 bool CFGHazardAfflicted = false;
2380 // Connect the dots between the top-down-collected RetainsToMove and
2381 // bottom-up-collected ReleasesToMove to form sets of related calls.
2382 // This is an iterative process so that we connect multiple releases
2383 // to multiple retains if needed.
2384 unsigned OldDelta = 0;
2385 unsigned NewDelta = 0;
2386 unsigned OldCount = 0;
2387 unsigned NewCount = 0;
2388 bool FirstRelease = true;
2390 for (SmallVectorImpl<Instruction *>::const_iterator
2391 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2392 Instruction *NewRetain = *NI;
2393 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2394 assert(It != Retains.end());
2395 const RRInfo &NewRetainRRI = It->second;
2396 KnownSafeTD &= NewRetainRRI.KnownSafe;
2398 MultipleOwners || MultiOwnersSet.count(GetObjCArg(NewRetain));
2399 for (SmallPtrSet<Instruction *, 2>::const_iterator
2400 LI = NewRetainRRI.Calls.begin(),
2401 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2402 Instruction *NewRetainRelease = *LI;
2403 DenseMap<Value *, RRInfo>::const_iterator Jt =
2404 Releases.find(NewRetainRelease);
2405 if (Jt == Releases.end())
2407 const RRInfo &NewRetainReleaseRRI = Jt->second;
2409 // If the release does not have a reference to the retain as well,
2410 // something happened which is unaccounted for. Do not do anything.
2412 // This can happen if we catch an additive overflow during path count
2414 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
2417 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2419 // If we overflow when we compute the path count, don't remove/move
2421 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
2422 unsigned PathCount = BBState::OverflowOccurredValue;
2423 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2425 assert(PathCount != BBState::OverflowOccurredValue &&
2426 "PathCount at this point can not be "
2427 "OverflowOccurredValue.");
2428 OldDelta -= PathCount;
2430 // Merge the ReleaseMetadata and IsTailCallRelease values.
2432 ReleasesToMove.ReleaseMetadata =
2433 NewRetainReleaseRRI.ReleaseMetadata;
2434 ReleasesToMove.IsTailCallRelease =
2435 NewRetainReleaseRRI.IsTailCallRelease;
2436 FirstRelease = false;
2438 if (ReleasesToMove.ReleaseMetadata !=
2439 NewRetainReleaseRRI.ReleaseMetadata)
2440 ReleasesToMove.ReleaseMetadata = 0;
2441 if (ReleasesToMove.IsTailCallRelease !=
2442 NewRetainReleaseRRI.IsTailCallRelease)
2443 ReleasesToMove.IsTailCallRelease = false;
2446 // Collect the optimal insertion points.
2448 for (SmallPtrSet<Instruction *, 2>::const_iterator
2449 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2450 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2452 Instruction *RIP = *RI;
2453 if (ReleasesToMove.ReverseInsertPts.insert(RIP)) {
2454 // If we overflow when we compute the path count, don't
2455 // remove/move anything.
2456 const BBState &RIPBBState = BBStates[RIP->getParent()];
2457 PathCount = BBState::OverflowOccurredValue;
2458 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2460 assert(PathCount != BBState::OverflowOccurredValue &&
2461 "PathCount at this point can not be "
2462 "OverflowOccurredValue.");
2463 NewDelta -= PathCount;
2466 NewReleases.push_back(NewRetainRelease);
2471 if (NewReleases.empty()) break;
2473 // Back the other way.
2474 for (SmallVectorImpl<Instruction *>::const_iterator
2475 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2476 Instruction *NewRelease = *NI;
2477 DenseMap<Value *, RRInfo>::const_iterator It =
2478 Releases.find(NewRelease);
2479 assert(It != Releases.end());
2480 const RRInfo &NewReleaseRRI = It->second;
2481 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2482 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
2483 for (SmallPtrSet<Instruction *, 2>::const_iterator
2484 LI = NewReleaseRRI.Calls.begin(),
2485 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2486 Instruction *NewReleaseRetain = *LI;
2487 MapVector<Value *, RRInfo>::const_iterator Jt =
2488 Retains.find(NewReleaseRetain);
2489 if (Jt == Retains.end())
2491 const RRInfo &NewReleaseRetainRRI = Jt->second;
2493 // If the retain does not have a reference to the release as well,
2494 // something happened which is unaccounted for. Do not do anything.
2496 // This can happen if we catch an additive overflow during path count
2498 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
2501 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2502 // If we overflow when we compute the path count, don't remove/move
2504 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
2505 unsigned PathCount = BBState::OverflowOccurredValue;
2506 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2508 assert(PathCount != BBState::OverflowOccurredValue &&
2509 "PathCount at this point can not be "
2510 "OverflowOccurredValue.");
2511 OldDelta += PathCount;
2512 OldCount += PathCount;
2514 // Collect the optimal insertion points.
2516 for (SmallPtrSet<Instruction *, 2>::const_iterator
2517 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2518 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2520 Instruction *RIP = *RI;
2521 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2522 // If we overflow when we compute the path count, don't
2523 // remove/move anything.
2524 const BBState &RIPBBState = BBStates[RIP->getParent()];
2526 PathCount = BBState::OverflowOccurredValue;
2527 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2529 assert(PathCount != BBState::OverflowOccurredValue &&
2530 "PathCount at this point can not be "
2531 "OverflowOccurredValue.");
2532 NewDelta += PathCount;
2533 NewCount += PathCount;
2536 NewRetains.push_back(NewReleaseRetain);
2540 NewReleases.clear();
2541 if (NewRetains.empty()) break;
2544 // If the pointer is known incremented in 1 direction and we do not have
2545 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2546 // to be known safe in both directions.
2547 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2548 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2549 if (UnconditionallySafe) {
2550 RetainsToMove.ReverseInsertPts.clear();
2551 ReleasesToMove.ReverseInsertPts.clear();
2554 // Determine whether the new insertion points we computed preserve the
2555 // balance of retain and release calls through the program.
2556 // TODO: If the fully aggressive solution isn't valid, try to find a
2557 // less aggressive solution which is.
2561 // At this point, we are not going to remove any RR pairs, but we still are
2562 // able to move RR pairs. If one of our pointers is afflicted with
2563 // CFGHazards, we cannot perform such code motion so exit early.
2564 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
2565 ReleasesToMove.ReverseInsertPts.size();
2566 if (CFGHazardAfflicted && WillPerformCodeMotion)
2570 // Determine whether the original call points are balanced in the retain and
2571 // release calls through the program. If not, conservatively don't touch
2573 // TODO: It's theoretically possible to do code motion in this case, as
2574 // long as the existing imbalances are maintained.
2578 #ifdef ARC_ANNOTATIONS
2579 // Do not move calls if ARC annotations are requested.
2580 if (EnableARCAnnotations)
2582 #endif // ARC_ANNOTATIONS
2585 assert(OldCount != 0 && "Unreachable code?");
2586 NumRRs += OldCount - NewCount;
2587 // Set to true if we completely removed any RR pairs.
2588 AnyPairsCompletelyEliminated = NewCount == 0;
2590 // We can move calls!
2594 /// Identify pairings between the retains and releases, and delete and/or move
2597 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2599 MapVector<Value *, RRInfo> &Retains,
2600 DenseMap<Value *, RRInfo> &Releases,
2602 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2604 bool AnyPairsCompletelyEliminated = false;
2605 RRInfo RetainsToMove;
2606 RRInfo ReleasesToMove;
2607 SmallVector<Instruction *, 4> NewRetains;
2608 SmallVector<Instruction *, 4> NewReleases;
2609 SmallVector<Instruction *, 8> DeadInsts;
2611 // Visit each retain.
2612 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2613 E = Retains.end(); I != E; ++I) {
2614 Value *V = I->first;
2615 if (!V) continue; // blotted
2617 Instruction *Retain = cast<Instruction>(V);
2619 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2621 Value *Arg = GetObjCArg(Retain);
2623 // If the object being released is in static or stack storage, we know it's
2624 // not being managed by ObjC reference counting, so we can delete pairs
2625 // regardless of what possible decrements or uses lie between them.
2626 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2628 // A constant pointer can't be pointing to an object on the heap. It may
2629 // be reference-counted, but it won't be deleted.
2630 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2631 if (const GlobalVariable *GV =
2632 dyn_cast<GlobalVariable>(
2633 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2634 if (GV->isConstant())
2637 // Connect the dots between the top-down-collected RetainsToMove and
2638 // bottom-up-collected ReleasesToMove to form sets of related calls.
2639 NewRetains.push_back(Retain);
2640 bool PerformMoveCalls =
2641 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2642 NewReleases, DeadInsts, RetainsToMove,
2643 ReleasesToMove, Arg, KnownSafe,
2644 AnyPairsCompletelyEliminated);
2646 if (PerformMoveCalls) {
2647 // Ok, everything checks out and we're all set. Let's move/delete some
2649 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2650 Retains, Releases, DeadInsts, M);
2653 // Clean up state for next retain.
2654 NewReleases.clear();
2656 RetainsToMove.clear();
2657 ReleasesToMove.clear();
2660 // Now that we're done moving everything, we can delete the newly dead
2661 // instructions, as we no longer need them as insert points.
2662 while (!DeadInsts.empty())
2663 EraseInstruction(DeadInsts.pop_back_val());
2665 return AnyPairsCompletelyEliminated;
2668 /// Weak pointer optimizations.
2669 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2670 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2672 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2673 // itself because it uses AliasAnalysis and we need to do provenance
2675 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2676 Instruction *Inst = &*I++;
2678 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2680 InstructionClass Class = GetBasicInstructionClass(Inst);
2681 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2684 // Delete objc_loadWeak calls with no users.
2685 if (Class == IC_LoadWeak && Inst->use_empty()) {
2686 Inst->eraseFromParent();
2690 // TODO: For now, just look for an earlier available version of this value
2691 // within the same block. Theoretically, we could do memdep-style non-local
2692 // analysis too, but that would want caching. A better approach would be to
2693 // use the technique that EarlyCSE uses.
2694 inst_iterator Current = std::prev(I);
2695 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2696 for (BasicBlock::iterator B = CurrentBB->begin(),
2697 J = Current.getInstructionIterator();
2699 Instruction *EarlierInst = &*std::prev(J);
2700 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2701 switch (EarlierClass) {
2703 case IC_LoadWeakRetained: {
2704 // If this is loading from the same pointer, replace this load's value
2706 CallInst *Call = cast<CallInst>(Inst);
2707 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2708 Value *Arg = Call->getArgOperand(0);
2709 Value *EarlierArg = EarlierCall->getArgOperand(0);
2710 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2711 case AliasAnalysis::MustAlias:
2713 // If the load has a builtin retain, insert a plain retain for it.
2714 if (Class == IC_LoadWeakRetained) {
2715 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2716 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2719 // Zap the fully redundant load.
2720 Call->replaceAllUsesWith(EarlierCall);
2721 Call->eraseFromParent();
2723 case AliasAnalysis::MayAlias:
2724 case AliasAnalysis::PartialAlias:
2726 case AliasAnalysis::NoAlias:
2733 // If this is storing to the same pointer and has the same size etc.
2734 // replace this load's value with the stored value.
2735 CallInst *Call = cast<CallInst>(Inst);
2736 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2737 Value *Arg = Call->getArgOperand(0);
2738 Value *EarlierArg = EarlierCall->getArgOperand(0);
2739 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2740 case AliasAnalysis::MustAlias:
2742 // If the load has a builtin retain, insert a plain retain for it.
2743 if (Class == IC_LoadWeakRetained) {
2744 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2745 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2748 // Zap the fully redundant load.
2749 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2750 Call->eraseFromParent();
2752 case AliasAnalysis::MayAlias:
2753 case AliasAnalysis::PartialAlias:
2755 case AliasAnalysis::NoAlias:
2762 // TOOD: Grab the copied value.
2764 case IC_AutoreleasepoolPush:
2766 case IC_IntrinsicUser:
2768 // Weak pointers are only modified through the weak entry points
2769 // (and arbitrary calls, which could call the weak entry points).
2772 // Anything else could modify the weak pointer.
2779 // Then, for each destroyWeak with an alloca operand, check to see if
2780 // the alloca and all its users can be zapped.
2781 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2782 Instruction *Inst = &*I++;
2783 InstructionClass Class = GetBasicInstructionClass(Inst);
2784 if (Class != IC_DestroyWeak)
2787 CallInst *Call = cast<CallInst>(Inst);
2788 Value *Arg = Call->getArgOperand(0);
2789 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2790 for (Value::use_iterator UI = Alloca->use_begin(),
2791 UE = Alloca->use_end(); UI != UE; ++UI) {
2792 const Instruction *UserInst = cast<Instruction>(*UI);
2793 switch (GetBasicInstructionClass(UserInst)) {
2796 case IC_DestroyWeak:
2803 for (Value::use_iterator UI = Alloca->use_begin(),
2804 UE = Alloca->use_end(); UI != UE; ) {
2805 CallInst *UserInst = cast<CallInst>(*UI++);
2806 switch (GetBasicInstructionClass(UserInst)) {
2809 // These functions return their second argument.
2810 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2812 case IC_DestroyWeak:
2816 llvm_unreachable("alloca really is used!");
2818 UserInst->eraseFromParent();
2820 Alloca->eraseFromParent();
2826 /// Identify program paths which execute sequences of retains and releases which
2827 /// can be eliminated.
2828 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2829 // Releases, Retains - These are used to store the results of the main flow
2830 // analysis. These use Value* as the key instead of Instruction* so that the
2831 // map stays valid when we get around to rewriting code and calls get
2832 // replaced by arguments.
2833 DenseMap<Value *, RRInfo> Releases;
2834 MapVector<Value *, RRInfo> Retains;
2836 // This is used during the traversal of the function to track the
2837 // states for each identified object at each block.
2838 DenseMap<const BasicBlock *, BBState> BBStates;
2840 // Analyze the CFG of the function, and all instructions.
2841 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2844 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2849 MultiOwnersSet.clear();
2851 return AnyPairsCompletelyEliminated && NestingDetected;
2854 /// Check if there is a dependent call earlier that does not have anything in
2855 /// between the Retain and the call that can affect the reference count of their
2856 /// shared pointer argument. Note that Retain need not be in BB.
2858 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2859 SmallPtrSet<Instruction *, 4> &DepInsts,
2860 SmallPtrSet<const BasicBlock *, 4> &Visited,
2861 ProvenanceAnalysis &PA) {
2862 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2863 DepInsts, Visited, PA);
2864 if (DepInsts.size() != 1)
2868 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2870 // Check that the pointer is the return value of the call.
2871 if (!Call || Arg != Call)
2874 // Check that the call is a regular call.
2875 InstructionClass Class = GetBasicInstructionClass(Call);
2876 if (Class != IC_CallOrUser && Class != IC_Call)
2882 /// Find a dependent retain that precedes the given autorelease for which there
2883 /// is nothing in between the two instructions that can affect the ref count of
2886 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2887 Instruction *Autorelease,
2888 SmallPtrSet<Instruction *, 4> &DepInsts,
2889 SmallPtrSet<const BasicBlock *, 4> &Visited,
2890 ProvenanceAnalysis &PA) {
2891 FindDependencies(CanChangeRetainCount, Arg,
2892 BB, Autorelease, DepInsts, Visited, PA);
2893 if (DepInsts.size() != 1)
2897 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2899 // Check that we found a retain with the same argument.
2901 !IsRetain(GetBasicInstructionClass(Retain)) ||
2902 GetObjCArg(Retain) != Arg) {
2909 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2910 /// no instructions dependent on Arg that need a positive ref count in between
2911 /// the autorelease and the ret.
2913 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2915 SmallPtrSet<Instruction *, 4> &DepInsts,
2916 SmallPtrSet<const BasicBlock *, 4> &V,
2917 ProvenanceAnalysis &PA) {
2918 FindDependencies(NeedsPositiveRetainCount, Arg,
2919 BB, Ret, DepInsts, V, PA);
2920 if (DepInsts.size() != 1)
2923 CallInst *Autorelease =
2924 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2927 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
2928 if (!IsAutorelease(AutoreleaseClass))
2930 if (GetObjCArg(Autorelease) != Arg)
2936 /// Look for this pattern:
2938 /// %call = call i8* @something(...)
2939 /// %2 = call i8* @objc_retain(i8* %call)
2940 /// %3 = call i8* @objc_autorelease(i8* %2)
2943 /// And delete the retain and autorelease.
2944 void ObjCARCOpt::OptimizeReturns(Function &F) {
2945 if (!F.getReturnType()->isPointerTy())
2948 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2950 SmallPtrSet<Instruction *, 4> DependingInstructions;
2951 SmallPtrSet<const BasicBlock *, 4> Visited;
2952 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2953 BasicBlock *BB = FI;
2954 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2956 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2961 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
2963 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2964 // dependent on Arg such that there are no instructions dependent on Arg
2965 // that need a positive ref count in between the autorelease and Ret.
2966 CallInst *Autorelease =
2967 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2968 DependingInstructions, Visited,
2970 DependingInstructions.clear();
2977 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2978 DependingInstructions, Visited, PA);
2979 DependingInstructions.clear();
2985 // Check that there is nothing that can affect the reference count
2986 // between the retain and the call. Note that Retain need not be in BB.
2987 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2988 DependingInstructions,
2990 DependingInstructions.clear();
2993 if (!HasSafePathToCall)
2996 // If so, we can zap the retain and autorelease.
2999 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
3000 << *Autorelease << "\n");
3001 EraseInstruction(Retain);
3002 EraseInstruction(Autorelease);
3008 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
3009 llvm::Statistic &NumRetains =
3010 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
3011 llvm::Statistic &NumReleases =
3012 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
3014 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3015 Instruction *Inst = &*I++;
3016 switch (GetBasicInstructionClass(Inst)) {
3030 bool ObjCARCOpt::doInitialization(Module &M) {
3034 // If nothing in the Module uses ARC, don't do anything.
3035 Run = ModuleHasARC(M);
3039 // Identify the imprecise release metadata kind.
3040 ImpreciseReleaseMDKind =
3041 M.getContext().getMDKindID("clang.imprecise_release");
3042 CopyOnEscapeMDKind =
3043 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3044 NoObjCARCExceptionsMDKind =
3045 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3046 #ifdef ARC_ANNOTATIONS
3047 ARCAnnotationBottomUpMDKind =
3048 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3049 ARCAnnotationTopDownMDKind =
3050 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3051 ARCAnnotationProvenanceSourceMDKind =
3052 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3053 #endif // ARC_ANNOTATIONS
3055 // Intuitively, objc_retain and others are nocapture, however in practice
3056 // they are not, because they return their argument value. And objc_release
3057 // calls finalizers which can have arbitrary side effects.
3059 // Initialize our runtime entry point cache.
3065 bool ObjCARCOpt::runOnFunction(Function &F) {
3069 // If nothing in the Module uses ARC, don't do anything.
3075 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3078 PA.setAA(&getAnalysis<AliasAnalysis>());
3081 if (AreStatisticsEnabled()) {
3082 GatherStatistics(F, false);
3086 // This pass performs several distinct transformations. As a compile-time aid
3087 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3088 // library functions aren't declared.
3090 // Preliminary optimizations. This also computes UsedInThisFunction.
3091 OptimizeIndividualCalls(F);
3093 // Optimizations for weak pointers.
3094 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3095 (1 << IC_LoadWeakRetained) |
3096 (1 << IC_StoreWeak) |
3097 (1 << IC_InitWeak) |
3098 (1 << IC_CopyWeak) |
3099 (1 << IC_MoveWeak) |
3100 (1 << IC_DestroyWeak)))
3101 OptimizeWeakCalls(F);
3103 // Optimizations for retain+release pairs.
3104 if (UsedInThisFunction & ((1 << IC_Retain) |
3105 (1 << IC_RetainRV) |
3106 (1 << IC_RetainBlock)))
3107 if (UsedInThisFunction & (1 << IC_Release))
3108 // Run OptimizeSequences until it either stops making changes or
3109 // no retain+release pair nesting is detected.
3110 while (OptimizeSequences(F)) {}
3112 // Optimizations if objc_autorelease is used.
3113 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3114 (1 << IC_AutoreleaseRV)))
3117 // Gather statistics after optimization.
3119 if (AreStatisticsEnabled()) {
3120 GatherStatistics(F, true);
3124 DEBUG(dbgs() << "\n");
3129 void ObjCARCOpt::releaseMemory() {