1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
28 #include "ARCRuntimeEntryPoints.h"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/DenseSet.h"
34 #include "llvm/ADT/STLExtras.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/Statistic.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/IRBuilder.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/raw_ostream.h"
44 using namespace llvm::objcarc;
46 #define DEBUG_TYPE "objc-arc-opts"
48 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
52 /// \brief An associative container with fast insertion-order (deterministic)
53 /// iteration over its elements. Plus the special blot operation.
54 template<class KeyT, class ValueT>
56 /// Map keys to indices in Vector.
57 typedef DenseMap<KeyT, size_t> MapTy;
60 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
65 typedef typename VectorTy::iterator iterator;
66 typedef typename VectorTy::const_iterator const_iterator;
67 iterator begin() { return Vector.begin(); }
68 iterator end() { return Vector.end(); }
69 const_iterator begin() const { return Vector.begin(); }
70 const_iterator end() const { return Vector.end(); }
74 assert(Vector.size() >= Map.size()); // May differ due to blotting.
75 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
77 assert(I->second < Vector.size());
78 assert(Vector[I->second].first == I->first);
80 for (typename VectorTy::const_iterator I = Vector.begin(),
81 E = Vector.end(); I != E; ++I)
83 (Map.count(I->first) &&
84 Map[I->first] == size_t(I - Vector.begin())));
88 ValueT &operator[](const KeyT &Arg) {
89 std::pair<typename MapTy::iterator, bool> Pair =
90 Map.insert(std::make_pair(Arg, size_t(0)));
92 size_t Num = Vector.size();
93 Pair.first->second = Num;
94 Vector.push_back(std::make_pair(Arg, ValueT()));
95 return Vector[Num].second;
97 return Vector[Pair.first->second].second;
100 std::pair<iterator, bool>
101 insert(const std::pair<KeyT, ValueT> &InsertPair) {
102 std::pair<typename MapTy::iterator, bool> Pair =
103 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
105 size_t Num = Vector.size();
106 Pair.first->second = Num;
107 Vector.push_back(InsertPair);
108 return std::make_pair(Vector.begin() + Num, true);
110 return std::make_pair(Vector.begin() + Pair.first->second, false);
113 iterator find(const KeyT &Key) {
114 typename MapTy::iterator It = Map.find(Key);
115 if (It == Map.end()) return Vector.end();
116 return Vector.begin() + It->second;
119 const_iterator find(const KeyT &Key) const {
120 typename MapTy::const_iterator It = Map.find(Key);
121 if (It == Map.end()) return Vector.end();
122 return Vector.begin() + It->second;
125 /// This is similar to erase, but instead of removing the element from the
126 /// vector, it just zeros out the key in the vector. This leaves iterators
127 /// intact, but clients must be prepared for zeroed-out keys when iterating.
128 void blot(const KeyT &Key) {
129 typename MapTy::iterator It = Map.find(Key);
130 if (It == Map.end()) return;
131 Vector[It->second].first = KeyT();
144 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
147 /// \brief This is similar to GetRCIdentityRoot but it stops as soon
148 /// as it finds a value with multiple uses.
149 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
150 if (Arg->hasOneUse()) {
151 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
152 return FindSingleUseIdentifiedObject(BC->getOperand(0));
153 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
154 if (GEP->hasAllZeroIndices())
155 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
156 if (IsForwarding(GetBasicARCInstKind(Arg)))
157 return FindSingleUseIdentifiedObject(
158 cast<CallInst>(Arg)->getArgOperand(0));
159 if (!IsObjCIdentifiedObject(Arg))
164 // If we found an identifiable object but it has multiple uses, but they are
165 // trivial uses, we can still consider this to be a single-use value.
166 if (IsObjCIdentifiedObject(Arg)) {
167 for (const User *U : Arg->users())
168 if (!U->use_empty() || GetRCIdentityRoot(U) != Arg)
177 /// This is a wrapper around getUnderlyingObjCPtr along the lines of
178 /// GetUnderlyingObjects except that it returns early when it sees the first
180 static inline bool AreAnyUnderlyingObjectsAnAlloca(const Value *V) {
181 SmallPtrSet<const Value *, 4> Visited;
182 SmallVector<const Value *, 4> Worklist;
183 Worklist.push_back(V);
185 const Value *P = Worklist.pop_back_val();
186 P = GetUnderlyingObjCPtr(P);
188 if (isa<AllocaInst>(P))
191 if (!Visited.insert(P).second)
194 if (const SelectInst *SI = dyn_cast<const SelectInst>(P)) {
195 Worklist.push_back(SI->getTrueValue());
196 Worklist.push_back(SI->getFalseValue());
200 if (const PHINode *PN = dyn_cast<const PHINode>(P)) {
201 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
202 Worklist.push_back(PN->getIncomingValue(i));
205 } while (!Worklist.empty());
213 /// \defgroup ARCOpt ARC Optimization.
216 // TODO: On code like this:
219 // stuff_that_cannot_release()
220 // objc_autorelease(%x)
221 // stuff_that_cannot_release()
223 // stuff_that_cannot_release()
224 // objc_autorelease(%x)
226 // The second retain and autorelease can be deleted.
228 // TODO: It should be possible to delete
229 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
230 // pairs if nothing is actually autoreleased between them. Also, autorelease
231 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
232 // after inlining) can be turned into plain release calls.
234 // TODO: Critical-edge splitting. If the optimial insertion point is
235 // a critical edge, the current algorithm has to fail, because it doesn't
236 // know how to split edges. It should be possible to make the optimizer
237 // think in terms of edges, rather than blocks, and then split critical
240 // TODO: OptimizeSequences could generalized to be Interprocedural.
242 // TODO: Recognize that a bunch of other objc runtime calls have
243 // non-escaping arguments and non-releasing arguments, and may be
244 // non-autoreleasing.
246 // TODO: Sink autorelease calls as far as possible. Unfortunately we
247 // usually can't sink them past other calls, which would be the main
248 // case where it would be useful.
250 // TODO: The pointer returned from objc_loadWeakRetained is retained.
252 // TODO: Delete release+retain pairs (rare).
254 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
255 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
256 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
257 STATISTIC(NumRets, "Number of return value forwarding "
258 "retain+autoreleases eliminated");
259 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
260 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
262 STATISTIC(NumRetainsBeforeOpt,
263 "Number of retains before optimization");
264 STATISTIC(NumReleasesBeforeOpt,
265 "Number of releases before optimization");
266 STATISTIC(NumRetainsAfterOpt,
267 "Number of retains after optimization");
268 STATISTIC(NumReleasesAfterOpt,
269 "Number of releases after optimization");
275 /// \brief A sequence of states that a pointer may go through in which an
276 /// objc_retain and objc_release are actually needed.
279 S_Retain, ///< objc_retain(x).
280 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
281 S_Use, ///< any use of x.
282 S_Stop, ///< like S_Release, but code motion is stopped.
283 S_Release, ///< objc_release(x).
284 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
287 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
288 LLVM_ATTRIBUTE_UNUSED;
289 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
292 return OS << "S_None";
294 return OS << "S_Retain";
296 return OS << "S_CanRelease";
298 return OS << "S_Use";
300 return OS << "S_Release";
301 case S_MovableRelease:
302 return OS << "S_MovableRelease";
304 return OS << "S_Stop";
306 llvm_unreachable("Unknown sequence type.");
310 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
314 if (A == S_None || B == S_None)
317 if (A > B) std::swap(A, B);
319 // Choose the side which is further along in the sequence.
320 if ((A == S_Retain || A == S_CanRelease) &&
321 (B == S_CanRelease || B == S_Use))
324 // Choose the side which is further along in the sequence.
325 if ((A == S_Use || A == S_CanRelease) &&
326 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
328 // If both sides are releases, choose the more conservative one.
329 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
331 if (A == S_Release && B == S_MovableRelease)
339 /// \brief Unidirectional information about either a
340 /// retain-decrement-use-release sequence or release-use-decrement-retain
341 /// reverse sequence.
343 /// After an objc_retain, the reference count of the referenced
344 /// object is known to be positive. Similarly, before an objc_release, the
345 /// reference count of the referenced object is known to be positive. If
346 /// there are retain-release pairs in code regions where the retain count
347 /// is known to be positive, they can be eliminated, regardless of any side
348 /// effects between them.
350 /// Also, a retain+release pair nested within another retain+release
351 /// pair all on the known same pointer value can be eliminated, regardless
352 /// of any intervening side effects.
354 /// KnownSafe is true when either of these conditions is satisfied.
357 /// True of the objc_release calls are all marked with the "tail" keyword.
358 bool IsTailCallRelease;
360 /// If the Calls are objc_release calls and they all have a
361 /// clang.imprecise_release tag, this is the metadata tag.
362 MDNode *ReleaseMetadata;
364 /// For a top-down sequence, the set of objc_retains or
365 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
366 SmallPtrSet<Instruction *, 2> Calls;
368 /// The set of optimal insert positions for moving calls in the opposite
370 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
372 /// If this is true, we cannot perform code motion but can still remove
373 /// retain/release pairs.
374 bool CFGHazardAfflicted;
377 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(nullptr),
378 CFGHazardAfflicted(false) {}
382 /// Conservatively merge the two RRInfo. Returns true if a partial merge has
383 /// occurred, false otherwise.
384 bool Merge(const RRInfo &Other);
389 void RRInfo::clear() {
391 IsTailCallRelease = false;
392 ReleaseMetadata = nullptr;
394 ReverseInsertPts.clear();
395 CFGHazardAfflicted = false;
398 bool RRInfo::Merge(const RRInfo &Other) {
399 // Conservatively merge the ReleaseMetadata information.
400 if (ReleaseMetadata != Other.ReleaseMetadata)
401 ReleaseMetadata = nullptr;
403 // Conservatively merge the boolean state.
404 KnownSafe &= Other.KnownSafe;
405 IsTailCallRelease &= Other.IsTailCallRelease;
406 CFGHazardAfflicted |= Other.CFGHazardAfflicted;
408 // Merge the call sets.
409 Calls.insert(Other.Calls.begin(), Other.Calls.end());
411 // Merge the insert point sets. If there are any differences,
412 // that makes this a partial merge.
413 bool Partial = ReverseInsertPts.size() != Other.ReverseInsertPts.size();
414 for (Instruction *Inst : Other.ReverseInsertPts)
415 Partial |= ReverseInsertPts.insert(Inst).second;
420 /// \brief This class summarizes several per-pointer runtime properties which
421 /// are propogated through the flow graph.
423 /// True if the reference count is known to be incremented.
424 bool KnownPositiveRefCount;
426 /// True if we've seen an opportunity for partial RR elimination, such as
427 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
430 /// The current position in the sequence.
431 unsigned char Seq : 8;
433 /// Unidirectional information about the current sequence.
437 PtrState() : KnownPositiveRefCount(false), Partial(false),
441 bool IsKnownSafe() const {
442 return RRI.KnownSafe;
445 void SetKnownSafe(const bool NewValue) {
446 RRI.KnownSafe = NewValue;
449 bool IsTailCallRelease() const {
450 return RRI.IsTailCallRelease;
453 void SetTailCallRelease(const bool NewValue) {
454 RRI.IsTailCallRelease = NewValue;
457 bool IsTrackingImpreciseReleases() const {
458 return RRI.ReleaseMetadata != nullptr;
461 const MDNode *GetReleaseMetadata() const {
462 return RRI.ReleaseMetadata;
465 void SetReleaseMetadata(MDNode *NewValue) {
466 RRI.ReleaseMetadata = NewValue;
469 bool IsCFGHazardAfflicted() const {
470 return RRI.CFGHazardAfflicted;
473 void SetCFGHazardAfflicted(const bool NewValue) {
474 RRI.CFGHazardAfflicted = NewValue;
477 void SetKnownPositiveRefCount() {
478 DEBUG(dbgs() << "Setting Known Positive.\n");
479 KnownPositiveRefCount = true;
482 void ClearKnownPositiveRefCount() {
483 DEBUG(dbgs() << "Clearing Known Positive.\n");
484 KnownPositiveRefCount = false;
487 bool HasKnownPositiveRefCount() const {
488 return KnownPositiveRefCount;
491 void SetSeq(Sequence NewSeq) {
492 DEBUG(dbgs() << "Old: " << Seq << "; New: " << NewSeq << "\n");
496 Sequence GetSeq() const {
497 return static_cast<Sequence>(Seq);
500 void ClearSequenceProgress() {
501 ResetSequenceProgress(S_None);
504 void ResetSequenceProgress(Sequence NewSeq) {
505 DEBUG(dbgs() << "Resetting sequence progress.\n");
511 void Merge(const PtrState &Other, bool TopDown);
513 void InsertCall(Instruction *I) {
517 void InsertReverseInsertPt(Instruction *I) {
518 RRI.ReverseInsertPts.insert(I);
521 void ClearReverseInsertPts() {
522 RRI.ReverseInsertPts.clear();
525 bool HasReverseInsertPts() const {
526 return !RRI.ReverseInsertPts.empty();
529 const RRInfo &GetRRInfo() const {
536 PtrState::Merge(const PtrState &Other, bool TopDown) {
537 Seq = MergeSeqs(GetSeq(), Other.GetSeq(), TopDown);
538 KnownPositiveRefCount &= Other.KnownPositiveRefCount;
540 // If we're not in a sequence (anymore), drop all associated state.
544 } else if (Partial || Other.Partial) {
545 // If we're doing a merge on a path that's previously seen a partial
546 // merge, conservatively drop the sequence, to avoid doing partial
547 // RR elimination. If the branch predicates for the two merge differ,
548 // mixing them is unsafe.
549 ClearSequenceProgress();
551 // Otherwise merge the other PtrState's RRInfo into our RRInfo. At this
552 // point, we know that currently we are not partial. Stash whether or not
553 // the merge operation caused us to undergo a partial merging of reverse
555 Partial = RRI.Merge(Other.RRI);
560 /// \brief Per-BasicBlock state.
562 /// The number of unique control paths from the entry which can reach this
564 unsigned TopDownPathCount;
566 /// The number of unique control paths to exits from this block.
567 unsigned BottomUpPathCount;
569 /// A type for PerPtrTopDown and PerPtrBottomUp.
570 typedef MapVector<const Value *, PtrState> MapTy;
572 /// The top-down traversal uses this to record information known about a
573 /// pointer at the bottom of each block.
576 /// The bottom-up traversal uses this to record information known about a
577 /// pointer at the top of each block.
578 MapTy PerPtrBottomUp;
580 /// Effective predecessors of the current block ignoring ignorable edges and
581 /// ignored backedges.
582 SmallVector<BasicBlock *, 2> Preds;
583 /// Effective successors of the current block ignoring ignorable edges and
584 /// ignored backedges.
585 SmallVector<BasicBlock *, 2> Succs;
588 static const unsigned OverflowOccurredValue;
590 BBState() : TopDownPathCount(0), BottomUpPathCount(0) { }
592 typedef MapTy::iterator ptr_iterator;
593 typedef MapTy::const_iterator ptr_const_iterator;
595 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
596 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
597 ptr_const_iterator top_down_ptr_begin() const {
598 return PerPtrTopDown.begin();
600 ptr_const_iterator top_down_ptr_end() const {
601 return PerPtrTopDown.end();
604 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
605 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
606 ptr_const_iterator bottom_up_ptr_begin() const {
607 return PerPtrBottomUp.begin();
609 ptr_const_iterator bottom_up_ptr_end() const {
610 return PerPtrBottomUp.end();
613 /// Mark this block as being an entry block, which has one path from the
614 /// entry by definition.
615 void SetAsEntry() { TopDownPathCount = 1; }
617 /// Mark this block as being an exit block, which has one path to an exit by
619 void SetAsExit() { BottomUpPathCount = 1; }
621 /// Attempt to find the PtrState object describing the top down state for
622 /// pointer Arg. Return a new initialized PtrState describing the top down
623 /// state for Arg if we do not find one.
624 PtrState &getPtrTopDownState(const Value *Arg) {
625 return PerPtrTopDown[Arg];
628 /// Attempt to find the PtrState object describing the bottom up state for
629 /// pointer Arg. Return a new initialized PtrState describing the bottom up
630 /// state for Arg if we do not find one.
631 PtrState &getPtrBottomUpState(const Value *Arg) {
632 return PerPtrBottomUp[Arg];
635 /// Attempt to find the PtrState object describing the bottom up state for
637 ptr_iterator findPtrBottomUpState(const Value *Arg) {
638 return PerPtrBottomUp.find(Arg);
641 void clearBottomUpPointers() {
642 PerPtrBottomUp.clear();
645 void clearTopDownPointers() {
646 PerPtrTopDown.clear();
649 void InitFromPred(const BBState &Other);
650 void InitFromSucc(const BBState &Other);
651 void MergePred(const BBState &Other);
652 void MergeSucc(const BBState &Other);
654 /// Compute the number of possible unique paths from an entry to an exit
655 /// which pass through this block. This is only valid after both the
656 /// top-down and bottom-up traversals are complete.
658 /// Returns true if overflow occurred. Returns false if overflow did not
660 bool GetAllPathCountWithOverflow(unsigned &PathCount) const {
661 if (TopDownPathCount == OverflowOccurredValue ||
662 BottomUpPathCount == OverflowOccurredValue)
664 unsigned long long Product =
665 (unsigned long long)TopDownPathCount*BottomUpPathCount;
666 // Overflow occurred if any of the upper bits of Product are set or if all
667 // the lower bits of Product are all set.
668 return (Product >> 32) ||
669 ((PathCount = Product) == OverflowOccurredValue);
672 // Specialized CFG utilities.
673 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
674 edge_iterator pred_begin() const { return Preds.begin(); }
675 edge_iterator pred_end() const { return Preds.end(); }
676 edge_iterator succ_begin() const { return Succs.begin(); }
677 edge_iterator succ_end() const { return Succs.end(); }
679 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
680 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
682 bool isExit() const { return Succs.empty(); }
685 const unsigned BBState::OverflowOccurredValue = 0xffffffff;
688 void BBState::InitFromPred(const BBState &Other) {
689 PerPtrTopDown = Other.PerPtrTopDown;
690 TopDownPathCount = Other.TopDownPathCount;
693 void BBState::InitFromSucc(const BBState &Other) {
694 PerPtrBottomUp = Other.PerPtrBottomUp;
695 BottomUpPathCount = Other.BottomUpPathCount;
698 /// The top-down traversal uses this to merge information about predecessors to
699 /// form the initial state for a new block.
700 void BBState::MergePred(const BBState &Other) {
701 if (TopDownPathCount == OverflowOccurredValue)
704 // Other.TopDownPathCount can be 0, in which case it is either dead or a
705 // loop backedge. Loop backedges are special.
706 TopDownPathCount += Other.TopDownPathCount;
708 // In order to be consistent, we clear the top down pointers when by adding
709 // TopDownPathCount becomes OverflowOccurredValue even though "true" overflow
711 if (TopDownPathCount == OverflowOccurredValue) {
712 clearTopDownPointers();
716 // Check for overflow. If we have overflow, fall back to conservative
718 if (TopDownPathCount < Other.TopDownPathCount) {
719 TopDownPathCount = OverflowOccurredValue;
720 clearTopDownPointers();
724 // For each entry in the other set, if our set has an entry with the same key,
725 // merge the entries. Otherwise, copy the entry and merge it with an empty
727 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
728 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
729 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
730 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
734 // For each entry in our set, if the other set doesn't have an entry with the
735 // same key, force it to merge with an empty entry.
736 for (ptr_iterator MI = top_down_ptr_begin(),
737 ME = top_down_ptr_end(); MI != ME; ++MI)
738 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
739 MI->second.Merge(PtrState(), /*TopDown=*/true);
742 /// The bottom-up traversal uses this to merge information about successors to
743 /// form the initial state for a new block.
744 void BBState::MergeSucc(const BBState &Other) {
745 if (BottomUpPathCount == OverflowOccurredValue)
748 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
749 // loop backedge. Loop backedges are special.
750 BottomUpPathCount += Other.BottomUpPathCount;
752 // In order to be consistent, we clear the top down pointers when by adding
753 // BottomUpPathCount becomes OverflowOccurredValue even though "true" overflow
755 if (BottomUpPathCount == OverflowOccurredValue) {
756 clearBottomUpPointers();
760 // Check for overflow. If we have overflow, fall back to conservative
762 if (BottomUpPathCount < Other.BottomUpPathCount) {
763 BottomUpPathCount = OverflowOccurredValue;
764 clearBottomUpPointers();
768 // For each entry in the other set, if our set has an entry with the
769 // same key, merge the entries. Otherwise, copy the entry and merge
770 // it with an empty entry.
771 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
772 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
773 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
774 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
778 // For each entry in our set, if the other set doesn't have an entry
779 // with the same key, force it to merge with an empty entry.
780 for (ptr_iterator MI = bottom_up_ptr_begin(),
781 ME = bottom_up_ptr_end(); MI != ME; ++MI)
782 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
783 MI->second.Merge(PtrState(), /*TopDown=*/false);
786 // Only enable ARC Annotations if we are building a debug version of
789 #define ARC_ANNOTATIONS
792 // Define some macros along the lines of DEBUG and some helper functions to make
793 // it cleaner to create annotations in the source code and to no-op when not
794 // building in debug mode.
795 #ifdef ARC_ANNOTATIONS
797 #include "llvm/Support/CommandLine.h"
799 /// Enable/disable ARC sequence annotations.
801 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false),
802 cl::desc("Enable emission of arc data flow analysis "
805 DisableCheckForCFGHazards("disable-objc-arc-checkforcfghazards", cl::init(false),
806 cl::desc("Disable check for cfg hazards when "
808 static cl::opt<std::string>
809 ARCAnnotationTargetIdentifier("objc-arc-annotation-target-identifier",
811 cl::desc("filter out all data flow annotations "
812 "but those that apply to the given "
813 "target llvm identifier."));
815 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
816 /// instruction so that we can track backwards when post processing via the llvm
817 /// arc annotation processor tool. If the function is an
818 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
820 MDString *Hash = nullptr;
822 // If pointer is a result of an instruction and it does not have a source
823 // MDNode it, attach a new MDNode onto it. If pointer is a result of
824 // an instruction and does have a source MDNode attached to it, return a
825 // reference to said Node. Otherwise just return 0.
826 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
828 if (!(Node = Inst->getMetadata(NodeId))) {
829 // We do not have any node. Generate and attatch the hash MDString to the
832 // We just use an MDString to ensure that this metadata gets written out
833 // of line at the module level and to provide a very simple format
834 // encoding the information herein. Both of these makes it simpler to
835 // parse the annotations by a simple external program.
837 raw_string_ostream os(Str);
838 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
839 << Inst->getName() << ")";
841 Hash = MDString::get(Inst->getContext(), os.str());
842 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
844 // We have a node. Grab its hash and return it.
845 assert(Node->getNumOperands() == 1 &&
846 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
847 Hash = cast<MDString>(Node->getOperand(0));
849 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
851 raw_string_ostream os(str);
852 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
854 Hash = MDString::get(Arg->getContext(), os.str());
860 static std::string SequenceToString(Sequence A) {
862 raw_string_ostream os(str);
867 /// Helper function to change a Sequence into a String object using our overload
868 /// for raw_ostream so we only have printing code in one location.
869 static MDString *SequenceToMDString(LLVMContext &Context,
871 return MDString::get(Context, SequenceToString(A));
874 /// A simple function to generate a MDNode which describes the change in state
875 /// for Value *Ptr caused by Instruction *Inst.
876 static void AppendMDNodeToInstForPtr(unsigned NodeId,
879 MDString *PtrSourceMDNodeID,
882 MDNode *Node = nullptr;
883 Metadata *tmp[3] = {PtrSourceMDNodeID,
884 SequenceToMDString(Inst->getContext(), OldSeq),
885 SequenceToMDString(Inst->getContext(), NewSeq)};
886 Node = MDNode::get(Inst->getContext(), tmp);
888 Inst->setMetadata(NodeId, Node);
891 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
892 /// state of a pointer at the entrance to a basic block.
893 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
894 Value *Ptr, Sequence Seq) {
895 // If we have a target identifier, make sure that we match it before
897 if(!ARCAnnotationTargetIdentifier.empty() &&
898 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
901 Module *M = BB->getParent()->getParent();
902 LLVMContext &C = M->getContext();
903 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
904 Type *I8XX = PointerType::getUnqual(I8X);
905 Type *Params[] = {I8XX, I8XX};
906 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
908 Constant *Callee = M->getOrInsertFunction(Name, FTy);
910 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
913 StringRef Tmp = Ptr->getName();
914 if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
915 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
917 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
918 cast<Constant>(ActualPtrName), Tmp);
922 std::string SeqStr = SequenceToString(Seq);
923 if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
924 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
926 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
927 cast<Constant>(ActualPtrName), SeqStr);
930 Builder.CreateCall2(Callee, PtrName, S);
933 /// Add to the end of the basic block llvm.ptr.annotations which show the state
934 /// of the pointer at the bottom of the basic block.
935 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
936 Value *Ptr, Sequence Seq) {
937 // If we have a target identifier, make sure that we match it before emitting
939 if(!ARCAnnotationTargetIdentifier.empty() &&
940 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
943 Module *M = BB->getParent()->getParent();
944 LLVMContext &C = M->getContext();
945 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
946 Type *I8XX = PointerType::getUnqual(I8X);
947 Type *Params[] = {I8XX, I8XX};
948 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C), Params,
950 Constant *Callee = M->getOrInsertFunction(Name, FTy);
952 IRBuilder<> Builder(BB, std::prev(BB->end()));
955 StringRef Tmp = Ptr->getName();
956 if (nullptr == (PtrName = M->getGlobalVariable(Tmp, true))) {
957 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
959 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
960 cast<Constant>(ActualPtrName), Tmp);
964 std::string SeqStr = SequenceToString(Seq);
965 if (nullptr == (S = M->getGlobalVariable(SeqStr, true))) {
966 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
968 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
969 cast<Constant>(ActualPtrName), SeqStr);
971 Builder.CreateCall2(Callee, PtrName, S);
974 /// Adds a source annotation to pointer and a state change annotation to Inst
975 /// referencing the source annotation and the old/new state of pointer.
976 static void GenerateARCAnnotation(unsigned InstMDId,
982 if (EnableARCAnnotations) {
983 // If we have a target identifier, make sure that we match it before
984 // emitting an annotation.
985 if(!ARCAnnotationTargetIdentifier.empty() &&
986 !Ptr->getName().equals(ARCAnnotationTargetIdentifier))
989 // First generate the source annotation on our pointer. This will return an
990 // MDString* if Ptr actually comes from an instruction implying we can put
991 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
992 // then we know that our pointer is from an Argument so we put a reference
993 // to the argument number.
995 // The point of this is to make it easy for the
996 // llvm-arc-annotation-processor tool to cross reference where the source
997 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
998 // information via debug info for backends to use (since why would anyone
999 // need such a thing from LLVM IR besides in non-standard cases
1001 MDString *SourcePtrMDNode =
1002 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
1003 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
1008 // The actual interface for accessing the above functionality is defined via
1009 // some simple macros which are defined below. We do this so that the user does
1010 // not need to pass in what metadata id is needed resulting in cleaner code and
1011 // additionally since it provides an easy way to conditionally no-op all
1012 // annotation support in a non-debug build.
1014 /// Use this macro to annotate a sequence state change when processing
1015 /// instructions bottom up,
1016 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
1017 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
1018 ARCAnnotationProvenanceSourceMDKind, (inst), \
1019 const_cast<Value*>(ptr), (old), (new))
1020 /// Use this macro to annotate a sequence state change when processing
1021 /// instructions top down.
1022 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
1023 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
1024 ARCAnnotationProvenanceSourceMDKind, (inst), \
1025 const_cast<Value*>(ptr), (old), (new))
1027 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
1029 if (EnableARCAnnotations) { \
1030 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
1031 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
1032 Value *Ptr = const_cast<Value*>(I->first); \
1033 Sequence Seq = I->second.GetSeq(); \
1034 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
1039 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
1040 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
1041 Entrance, bottom_up)
1042 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
1043 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
1044 Terminator, bottom_up)
1045 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
1046 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
1048 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
1049 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
1050 Terminator, top_down)
1052 #else // !ARC_ANNOTATION
1053 // If annotations are off, noop.
1054 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
1055 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
1056 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
1057 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
1058 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
1059 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
1060 #endif // !ARC_ANNOTATION
1063 /// \brief The main ARC optimization pass.
1064 class ObjCARCOpt : public FunctionPass {
1066 ProvenanceAnalysis PA;
1067 ARCRuntimeEntryPoints EP;
1069 // This is used to track if a pointer is stored into an alloca.
1070 DenseSet<const Value *> MultiOwnersSet;
1072 /// A flag indicating whether this optimization pass should run.
1075 /// Flags which determine whether each of the interesting runtine functions
1076 /// is in fact used in the current function.
1077 unsigned UsedInThisFunction;
1079 /// The Metadata Kind for clang.imprecise_release metadata.
1080 unsigned ImpreciseReleaseMDKind;
1082 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
1083 unsigned CopyOnEscapeMDKind;
1085 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
1086 unsigned NoObjCARCExceptionsMDKind;
1088 #ifdef ARC_ANNOTATIONS
1089 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
1090 unsigned ARCAnnotationBottomUpMDKind;
1091 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
1092 unsigned ARCAnnotationTopDownMDKind;
1093 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
1094 unsigned ARCAnnotationProvenanceSourceMDKind;
1095 #endif // ARC_ANNOATIONS
1097 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1098 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1099 ARCInstKind &Class);
1100 void OptimizeIndividualCalls(Function &F);
1102 void CheckForCFGHazards(const BasicBlock *BB,
1103 DenseMap<const BasicBlock *, BBState> &BBStates,
1104 BBState &MyStates) const;
1105 bool VisitInstructionBottomUp(Instruction *Inst,
1107 MapVector<Value *, RRInfo> &Retains,
1109 bool VisitBottomUp(BasicBlock *BB,
1110 DenseMap<const BasicBlock *, BBState> &BBStates,
1111 MapVector<Value *, RRInfo> &Retains);
1112 bool VisitInstructionTopDown(Instruction *Inst,
1113 DenseMap<Value *, RRInfo> &Releases,
1115 bool VisitTopDown(BasicBlock *BB,
1116 DenseMap<const BasicBlock *, BBState> &BBStates,
1117 DenseMap<Value *, RRInfo> &Releases);
1118 bool Visit(Function &F,
1119 DenseMap<const BasicBlock *, BBState> &BBStates,
1120 MapVector<Value *, RRInfo> &Retains,
1121 DenseMap<Value *, RRInfo> &Releases);
1123 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1124 MapVector<Value *, RRInfo> &Retains,
1125 DenseMap<Value *, RRInfo> &Releases,
1126 SmallVectorImpl<Instruction *> &DeadInsts,
1129 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1130 MapVector<Value *, RRInfo> &Retains,
1131 DenseMap<Value *, RRInfo> &Releases,
1133 SmallVectorImpl<Instruction *> &NewRetains,
1134 SmallVectorImpl<Instruction *> &NewReleases,
1135 SmallVectorImpl<Instruction *> &DeadInsts,
1136 RRInfo &RetainsToMove,
1137 RRInfo &ReleasesToMove,
1140 bool &AnyPairsCompletelyEliminated);
1142 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1143 MapVector<Value *, RRInfo> &Retains,
1144 DenseMap<Value *, RRInfo> &Releases,
1147 void OptimizeWeakCalls(Function &F);
1149 bool OptimizeSequences(Function &F);
1151 void OptimizeReturns(Function &F);
1154 void GatherStatistics(Function &F, bool AfterOptimization = false);
1157 void getAnalysisUsage(AnalysisUsage &AU) const override;
1158 bool doInitialization(Module &M) override;
1159 bool runOnFunction(Function &F) override;
1160 void releaseMemory() override;
1164 ObjCARCOpt() : FunctionPass(ID) {
1165 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1170 char ObjCARCOpt::ID = 0;
1171 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1172 "objc-arc", "ObjC ARC optimization", false, false)
1173 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1174 INITIALIZE_PASS_END(ObjCARCOpt,
1175 "objc-arc", "ObjC ARC optimization", false, false)
1177 Pass *llvm::createObjCARCOptPass() {
1178 return new ObjCARCOpt();
1181 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1182 AU.addRequired<ObjCARCAliasAnalysis>();
1183 AU.addRequired<AliasAnalysis>();
1184 // ARC optimization doesn't currently split critical edges.
1185 AU.setPreservesCFG();
1188 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1189 /// not a return value. Or, if it can be paired with an
1190 /// objc_autoreleaseReturnValue, delete the pair and return true.
1192 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1193 // Check for the argument being from an immediately preceding call or invoke.
1194 const Value *Arg = GetArgRCIdentityRoot(RetainRV);
1195 ImmutableCallSite CS(Arg);
1196 if (const Instruction *Call = CS.getInstruction()) {
1197 if (Call->getParent() == RetainRV->getParent()) {
1198 BasicBlock::const_iterator I = Call;
1200 while (IsNoopInstruction(I)) ++I;
1201 if (&*I == RetainRV)
1203 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1204 BasicBlock *RetainRVParent = RetainRV->getParent();
1205 if (II->getNormalDest() == RetainRVParent) {
1206 BasicBlock::const_iterator I = RetainRVParent->begin();
1207 while (IsNoopInstruction(I)) ++I;
1208 if (&*I == RetainRV)
1214 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1215 // pointer. In this case, we can delete the pair.
1216 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1218 do --I; while (I != Begin && IsNoopInstruction(I));
1219 if (GetBasicARCInstKind(I) == ARCInstKind::AutoreleaseRV &&
1220 GetArgRCIdentityRoot(I) == Arg) {
1224 DEBUG(dbgs() << "Erasing autoreleaseRV,retainRV pair: " << *I << "\n"
1225 << "Erasing " << *RetainRV << "\n");
1227 EraseInstruction(I);
1228 EraseInstruction(RetainRV);
1233 // Turn it to a plain objc_retain.
1237 DEBUG(dbgs() << "Transforming objc_retainAutoreleasedReturnValue => "
1238 "objc_retain since the operand is not a return value.\n"
1239 "Old = " << *RetainRV << "\n");
1241 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
1242 cast<CallInst>(RetainRV)->setCalledFunction(NewDecl);
1244 DEBUG(dbgs() << "New = " << *RetainRV << "\n");
1249 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1250 /// used as a return value.
1251 void ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F,
1252 Instruction *AutoreleaseRV,
1253 ARCInstKind &Class) {
1254 // Check for a return of the pointer value.
1255 const Value *Ptr = GetArgRCIdentityRoot(AutoreleaseRV);
1256 SmallVector<const Value *, 2> Users;
1257 Users.push_back(Ptr);
1259 Ptr = Users.pop_back_val();
1260 for (const User *U : Ptr->users()) {
1261 if (isa<ReturnInst>(U) || GetBasicARCInstKind(U) == ARCInstKind::RetainRV)
1263 if (isa<BitCastInst>(U))
1266 } while (!Users.empty());
1271 DEBUG(dbgs() << "Transforming objc_autoreleaseReturnValue => "
1272 "objc_autorelease since its operand is not used as a return "
1274 "Old = " << *AutoreleaseRV << "\n");
1276 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1277 Constant *NewDecl = EP.get(ARCRuntimeEntryPoints::EPT_Autorelease);
1278 AutoreleaseRVCI->setCalledFunction(NewDecl);
1279 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1280 Class = ARCInstKind::Autorelease;
1282 DEBUG(dbgs() << "New: " << *AutoreleaseRV << "\n");
1286 /// Visit each call, one at a time, and make simplifications without doing any
1287 /// additional analysis.
1288 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1289 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeIndividualCalls ==\n");
1290 // Reset all the flags in preparation for recomputing them.
1291 UsedInThisFunction = 0;
1293 // Visit all objc_* calls in F.
1294 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1295 Instruction *Inst = &*I++;
1297 ARCInstKind Class = GetBasicARCInstKind(Inst);
1299 DEBUG(dbgs() << "Visiting: Class: " << Class << "; " << *Inst << "\n");
1304 // Delete no-op casts. These function calls have special semantics, but
1305 // the semantics are entirely implemented via lowering in the front-end,
1306 // so by the time they reach the optimizer, they are just no-op calls
1307 // which return their argument.
1309 // There are gray areas here, as the ability to cast reference-counted
1310 // pointers to raw void* and back allows code to break ARC assumptions,
1311 // however these are currently considered to be unimportant.
1312 case ARCInstKind::NoopCast:
1315 DEBUG(dbgs() << "Erasing no-op cast: " << *Inst << "\n");
1316 EraseInstruction(Inst);
1319 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1320 case ARCInstKind::StoreWeak:
1321 case ARCInstKind::LoadWeak:
1322 case ARCInstKind::LoadWeakRetained:
1323 case ARCInstKind::InitWeak:
1324 case ARCInstKind::DestroyWeak: {
1325 CallInst *CI = cast<CallInst>(Inst);
1326 if (IsNullOrUndef(CI->getArgOperand(0))) {
1328 Type *Ty = CI->getArgOperand(0)->getType();
1329 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1330 Constant::getNullValue(Ty),
1332 llvm::Value *NewValue = UndefValue::get(CI->getType());
1333 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1334 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1335 CI->replaceAllUsesWith(NewValue);
1336 CI->eraseFromParent();
1341 case ARCInstKind::CopyWeak:
1342 case ARCInstKind::MoveWeak: {
1343 CallInst *CI = cast<CallInst>(Inst);
1344 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1345 IsNullOrUndef(CI->getArgOperand(1))) {
1347 Type *Ty = CI->getArgOperand(0)->getType();
1348 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1349 Constant::getNullValue(Ty),
1352 llvm::Value *NewValue = UndefValue::get(CI->getType());
1353 DEBUG(dbgs() << "A null pointer-to-weak-pointer is undefined behavior."
1354 "\nOld = " << *CI << "\nNew = " << *NewValue << "\n");
1356 CI->replaceAllUsesWith(NewValue);
1357 CI->eraseFromParent();
1362 case ARCInstKind::RetainRV:
1363 if (OptimizeRetainRVCall(F, Inst))
1366 case ARCInstKind::AutoreleaseRV:
1367 OptimizeAutoreleaseRVCall(F, Inst, Class);
1371 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1372 if (IsAutorelease(Class) && Inst->use_empty()) {
1373 CallInst *Call = cast<CallInst>(Inst);
1374 const Value *Arg = Call->getArgOperand(0);
1375 Arg = FindSingleUseIdentifiedObject(Arg);
1380 // Create the declaration lazily.
1381 LLVMContext &C = Inst->getContext();
1383 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
1384 CallInst *NewCall = CallInst::Create(Decl, Call->getArgOperand(0), "",
1386 NewCall->setMetadata(ImpreciseReleaseMDKind, MDNode::get(C, None));
1388 DEBUG(dbgs() << "Replacing autorelease{,RV}(x) with objc_release(x) "
1389 "since x is otherwise unused.\nOld: " << *Call << "\nNew: "
1390 << *NewCall << "\n");
1392 EraseInstruction(Call);
1394 Class = ARCInstKind::Release;
1398 // For functions which can never be passed stack arguments, add
1400 if (IsAlwaysTail(Class)) {
1402 DEBUG(dbgs() << "Adding tail keyword to function since it can never be "
1403 "passed stack args: " << *Inst << "\n");
1404 cast<CallInst>(Inst)->setTailCall();
1407 // Ensure that functions that can never have a "tail" keyword due to the
1408 // semantics of ARC truly do not do so.
1409 if (IsNeverTail(Class)) {
1411 DEBUG(dbgs() << "Removing tail keyword from function: " << *Inst <<
1413 cast<CallInst>(Inst)->setTailCall(false);
1416 // Set nounwind as needed.
1417 if (IsNoThrow(Class)) {
1419 DEBUG(dbgs() << "Found no throw class. Setting nounwind on: " << *Inst
1421 cast<CallInst>(Inst)->setDoesNotThrow();
1424 if (!IsNoopOnNull(Class)) {
1425 UsedInThisFunction |= 1 << unsigned(Class);
1429 const Value *Arg = GetArgRCIdentityRoot(Inst);
1431 // ARC calls with null are no-ops. Delete them.
1432 if (IsNullOrUndef(Arg)) {
1435 DEBUG(dbgs() << "ARC calls with null are no-ops. Erasing: " << *Inst
1437 EraseInstruction(Inst);
1441 // Keep track of which of retain, release, autorelease, and retain_block
1442 // are actually present in this function.
1443 UsedInThisFunction |= 1 << unsigned(Class);
1445 // If Arg is a PHI, and one or more incoming values to the
1446 // PHI are null, and the call is control-equivalent to the PHI, and there
1447 // are no relevant side effects between the PHI and the call, the call
1448 // could be pushed up to just those paths with non-null incoming values.
1449 // For now, don't bother splitting critical edges for this.
1450 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1451 Worklist.push_back(std::make_pair(Inst, Arg));
1453 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1457 const PHINode *PN = dyn_cast<PHINode>(Arg);
1460 // Determine if the PHI has any null operands, or any incoming
1462 bool HasNull = false;
1463 bool HasCriticalEdges = false;
1464 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1466 GetRCIdentityRoot(PN->getIncomingValue(i));
1467 if (IsNullOrUndef(Incoming))
1469 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1470 .getNumSuccessors() != 1) {
1471 HasCriticalEdges = true;
1475 // If we have null operands and no critical edges, optimize.
1476 if (!HasCriticalEdges && HasNull) {
1477 SmallPtrSet<Instruction *, 4> DependingInstructions;
1478 SmallPtrSet<const BasicBlock *, 4> Visited;
1480 // Check that there is nothing that cares about the reference
1481 // count between the call and the phi.
1483 case ARCInstKind::Retain:
1484 case ARCInstKind::RetainBlock:
1485 // These can always be moved up.
1487 case ARCInstKind::Release:
1488 // These can't be moved across things that care about the retain
1490 FindDependencies(NeedsPositiveRetainCount, Arg,
1491 Inst->getParent(), Inst,
1492 DependingInstructions, Visited, PA);
1494 case ARCInstKind::Autorelease:
1495 // These can't be moved across autorelease pool scope boundaries.
1496 FindDependencies(AutoreleasePoolBoundary, Arg,
1497 Inst->getParent(), Inst,
1498 DependingInstructions, Visited, PA);
1500 case ARCInstKind::RetainRV:
1501 case ARCInstKind::AutoreleaseRV:
1502 // Don't move these; the RV optimization depends on the autoreleaseRV
1503 // being tail called, and the retainRV being immediately after a call
1504 // (which might still happen if we get lucky with codegen layout, but
1505 // it's not worth taking the chance).
1508 llvm_unreachable("Invalid dependence flavor");
1511 if (DependingInstructions.size() == 1 &&
1512 *DependingInstructions.begin() == PN) {
1515 // Clone the call into each predecessor that has a non-null value.
1516 CallInst *CInst = cast<CallInst>(Inst);
1517 Type *ParamTy = CInst->getArgOperand(0)->getType();
1518 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1520 GetRCIdentityRoot(PN->getIncomingValue(i));
1521 if (!IsNullOrUndef(Incoming)) {
1522 CallInst *Clone = cast<CallInst>(CInst->clone());
1523 Value *Op = PN->getIncomingValue(i);
1524 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1525 if (Op->getType() != ParamTy)
1526 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1527 Clone->setArgOperand(0, Op);
1528 Clone->insertBefore(InsertPos);
1530 DEBUG(dbgs() << "Cloning "
1532 "And inserting clone at " << *InsertPos << "\n");
1533 Worklist.push_back(std::make_pair(Clone, Incoming));
1536 // Erase the original call.
1537 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1538 EraseInstruction(CInst);
1542 } while (!Worklist.empty());
1546 /// If we have a top down pointer in the S_Use state, make sure that there are
1547 /// no CFG hazards by checking the states of various bottom up pointers.
1548 static void CheckForUseCFGHazard(const Sequence SuccSSeq,
1549 const bool SuccSRRIKnownSafe,
1551 bool &SomeSuccHasSame,
1552 bool &AllSuccsHaveSame,
1553 bool &NotAllSeqEqualButKnownSafe,
1554 bool &ShouldContinue) {
1556 case S_CanRelease: {
1557 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe) {
1558 S.ClearSequenceProgress();
1561 S.SetCFGHazardAfflicted(true);
1562 ShouldContinue = true;
1566 SomeSuccHasSame = true;
1570 case S_MovableRelease:
1571 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1572 AllSuccsHaveSame = false;
1574 NotAllSeqEqualButKnownSafe = true;
1577 llvm_unreachable("bottom-up pointer in retain state!");
1579 llvm_unreachable("This should have been handled earlier.");
1583 /// If we have a Top Down pointer in the S_CanRelease state, make sure that
1584 /// there are no CFG hazards by checking the states of various bottom up
1586 static void CheckForCanReleaseCFGHazard(const Sequence SuccSSeq,
1587 const bool SuccSRRIKnownSafe,
1589 bool &SomeSuccHasSame,
1590 bool &AllSuccsHaveSame,
1591 bool &NotAllSeqEqualButKnownSafe) {
1594 SomeSuccHasSame = true;
1598 case S_MovableRelease:
1600 if (!S.IsKnownSafe() && !SuccSRRIKnownSafe)
1601 AllSuccsHaveSame = false;
1603 NotAllSeqEqualButKnownSafe = true;
1606 llvm_unreachable("bottom-up pointer in retain state!");
1608 llvm_unreachable("This should have been handled earlier.");
1612 /// Check for critical edges, loop boundaries, irreducible control flow, or
1613 /// other CFG structures where moving code across the edge would result in it
1614 /// being executed more.
1616 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1617 DenseMap<const BasicBlock *, BBState> &BBStates,
1618 BBState &MyStates) const {
1619 // If any top-down local-use or possible-dec has a succ which is earlier in
1620 // the sequence, forget it.
1621 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1622 E = MyStates.top_down_ptr_end(); I != E; ++I) {
1623 PtrState &S = I->second;
1624 const Sequence Seq = I->second.GetSeq();
1626 // We only care about S_Retain, S_CanRelease, and S_Use.
1630 // Make sure that if extra top down states are added in the future that this
1631 // code is updated to handle it.
1632 assert((Seq == S_Retain || Seq == S_CanRelease || Seq == S_Use) &&
1633 "Unknown top down sequence state.");
1635 const Value *Arg = I->first;
1636 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1637 bool SomeSuccHasSame = false;
1638 bool AllSuccsHaveSame = true;
1639 bool NotAllSeqEqualButKnownSafe = false;
1641 succ_const_iterator SI(TI), SE(TI, false);
1643 for (; SI != SE; ++SI) {
1644 // If VisitBottomUp has pointer information for this successor, take
1645 // what we know about it.
1646 const DenseMap<const BasicBlock *, BBState>::iterator BBI =
1648 assert(BBI != BBStates.end());
1649 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1650 const Sequence SuccSSeq = SuccS.GetSeq();
1652 // If bottom up, the pointer is in an S_None state, clear the sequence
1653 // progress since the sequence in the bottom up state finished
1654 // suggesting a mismatch in between retains/releases. This is true for
1655 // all three cases that we are handling here: S_Retain, S_Use, and
1657 if (SuccSSeq == S_None) {
1658 S.ClearSequenceProgress();
1662 // If we have S_Use or S_CanRelease, perform our check for cfg hazard
1664 const bool SuccSRRIKnownSafe = SuccS.IsKnownSafe();
1666 // *NOTE* We do not use Seq from above here since we are allowing for
1667 // S.GetSeq() to change while we are visiting basic blocks.
1668 switch(S.GetSeq()) {
1670 bool ShouldContinue = false;
1671 CheckForUseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S, SomeSuccHasSame,
1672 AllSuccsHaveSame, NotAllSeqEqualButKnownSafe,
1678 case S_CanRelease: {
1679 CheckForCanReleaseCFGHazard(SuccSSeq, SuccSRRIKnownSafe, S,
1680 SomeSuccHasSame, AllSuccsHaveSame,
1681 NotAllSeqEqualButKnownSafe);
1688 case S_MovableRelease:
1693 // If the state at the other end of any of the successor edges
1694 // matches the current state, require all edges to match. This
1695 // guards against loops in the middle of a sequence.
1696 if (SomeSuccHasSame && !AllSuccsHaveSame) {
1697 S.ClearSequenceProgress();
1698 } else if (NotAllSeqEqualButKnownSafe) {
1699 // If we would have cleared the state foregoing the fact that we are known
1700 // safe, stop code motion. This is because whether or not it is safe to
1701 // remove RR pairs via KnownSafe is an orthogonal concept to whether we
1702 // are allowed to perform code motion.
1703 S.SetCFGHazardAfflicted(true);
1709 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1711 MapVector<Value *, RRInfo> &Retains,
1712 BBState &MyStates) {
1713 bool NestingDetected = false;
1714 ARCInstKind Class = GetARCInstKind(Inst);
1715 const Value *Arg = nullptr;
1717 DEBUG(dbgs() << "Class: " << Class << "\n");
1720 case ARCInstKind::Release: {
1721 Arg = GetArgRCIdentityRoot(Inst);
1723 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1725 // If we see two releases in a row on the same pointer. If so, make
1726 // a note, and we'll cicle back to revisit it after we've
1727 // hopefully eliminated the second release, which may allow us to
1728 // eliminate the first release too.
1729 // Theoretically we could implement removal of nested retain+release
1730 // pairs by making PtrState hold a stack of states, but this is
1731 // simple and avoids adding overhead for the non-nested case.
1732 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1733 DEBUG(dbgs() << "Found nested releases (i.e. a release pair)\n");
1734 NestingDetected = true;
1737 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1738 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1739 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1740 S.ResetSequenceProgress(NewSeq);
1741 S.SetReleaseMetadata(ReleaseMetadata);
1742 S.SetKnownSafe(S.HasKnownPositiveRefCount());
1743 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
1745 S.SetKnownPositiveRefCount();
1748 case ARCInstKind::RetainBlock:
1749 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1750 // objc_retainBlocks to objc_retains. Thus at this point any
1751 // objc_retainBlocks that we see are not optimizable.
1753 case ARCInstKind::Retain:
1754 case ARCInstKind::RetainRV: {
1755 Arg = GetArgRCIdentityRoot(Inst);
1757 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1758 S.SetKnownPositiveRefCount();
1760 Sequence OldSeq = S.GetSeq();
1764 case S_MovableRelease:
1766 // If OldSeq is not S_Use or OldSeq is S_Use and we are tracking an
1767 // imprecise release, clear our reverse insertion points.
1768 if (OldSeq != S_Use || S.IsTrackingImpreciseReleases())
1769 S.ClearReverseInsertPts();
1772 // Don't do retain+release tracking for ARCInstKind::RetainRV,
1774 // better to let it remain as the first instruction after a call.
1775 if (Class != ARCInstKind::RetainRV)
1776 Retains[Inst] = S.GetRRInfo();
1777 S.ClearSequenceProgress();
1782 llvm_unreachable("bottom-up pointer in retain state!");
1784 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1785 // A retain moving bottom up can be a use.
1788 case ARCInstKind::AutoreleasepoolPop:
1789 // Conservatively, clear MyStates for all known pointers.
1790 MyStates.clearBottomUpPointers();
1791 return NestingDetected;
1792 case ARCInstKind::AutoreleasepoolPush:
1793 case ARCInstKind::None:
1794 // These are irrelevant.
1795 return NestingDetected;
1796 case ARCInstKind::User:
1797 // If we have a store into an alloca of a pointer we are tracking, the
1798 // pointer has multiple owners implying that we must be more conservative.
1800 // This comes up in the context of a pointer being ``KnownSafe''. In the
1801 // presence of a block being initialized, the frontend will emit the
1802 // objc_retain on the original pointer and the release on the pointer loaded
1803 // from the alloca. The optimizer will through the provenance analysis
1804 // realize that the two are related, but since we only require KnownSafe in
1805 // one direction, will match the inner retain on the original pointer with
1806 // the guard release on the original pointer. This is fixed by ensuring that
1807 // in the presence of allocas we only unconditionally remove pointers if
1808 // both our retain and our release are KnownSafe.
1809 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1810 if (AreAnyUnderlyingObjectsAnAlloca(SI->getPointerOperand())) {
1811 BBState::ptr_iterator I = MyStates.findPtrBottomUpState(
1812 GetRCIdentityRoot(SI->getValueOperand()));
1813 if (I != MyStates.bottom_up_ptr_end())
1814 MultiOwnersSet.insert(I->first);
1822 // Consider any other possible effects of this instruction on each
1823 // pointer being tracked.
1824 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1825 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1826 const Value *Ptr = MI->first;
1828 continue; // Handled above.
1829 PtrState &S = MI->second;
1830 Sequence Seq = S.GetSeq();
1832 // Check for possible releases.
1833 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1834 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
1836 S.ClearKnownPositiveRefCount();
1839 S.SetSeq(S_CanRelease);
1840 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1844 case S_MovableRelease:
1849 llvm_unreachable("bottom-up pointer in retain state!");
1853 // Check for possible direct uses.
1856 case S_MovableRelease:
1857 if (CanUse(Inst, Ptr, PA, Class)) {
1858 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
1860 assert(!S.HasReverseInsertPts());
1861 // If this is an invoke instruction, we're scanning it as part of
1862 // one of its successor blocks, since we can't insert code after it
1863 // in its own block, and we don't want to split critical edges.
1864 if (isa<InvokeInst>(Inst))
1865 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1867 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1869 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1870 } else if (Seq == S_Release && IsUser(Class)) {
1871 DEBUG(dbgs() << "PreciseReleaseUse: Seq: " << Seq << "; " << *Ptr
1873 // Non-movable releases depend on any possible objc pointer use.
1875 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
1876 assert(!S.HasReverseInsertPts());
1877 // As above; handle invoke specially.
1878 if (isa<InvokeInst>(Inst))
1879 S.InsertReverseInsertPt(BB->getFirstInsertionPt());
1881 S.InsertReverseInsertPt(std::next(BasicBlock::iterator(Inst)));
1885 if (CanUse(Inst, Ptr, PA, Class)) {
1886 DEBUG(dbgs() << "PreciseStopUse: Seq: " << Seq << "; " << *Ptr
1889 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1897 llvm_unreachable("bottom-up pointer in retain state!");
1901 return NestingDetected;
1905 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1906 DenseMap<const BasicBlock *, BBState> &BBStates,
1907 MapVector<Value *, RRInfo> &Retains) {
1909 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitBottomUp ==\n");
1911 bool NestingDetected = false;
1912 BBState &MyStates = BBStates[BB];
1914 // Merge the states from each successor to compute the initial state
1915 // for the current block.
1916 BBState::edge_iterator SI(MyStates.succ_begin()),
1917 SE(MyStates.succ_end());
1919 const BasicBlock *Succ = *SI;
1920 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1921 assert(I != BBStates.end());
1922 MyStates.InitFromSucc(I->second);
1924 for (; SI != SE; ++SI) {
1926 I = BBStates.find(Succ);
1927 assert(I != BBStates.end());
1928 MyStates.MergeSucc(I->second);
1932 // If ARC Annotations are enabled, output the current state of pointers at the
1933 // bottom of the basic block.
1934 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
1936 // Visit all the instructions, bottom-up.
1937 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1938 Instruction *Inst = std::prev(I);
1940 // Invoke instructions are visited as part of their successors (below).
1941 if (isa<InvokeInst>(Inst))
1944 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
1946 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1949 // If there's a predecessor with an invoke, visit the invoke as if it were
1950 // part of this block, since we can't insert code after an invoke in its own
1951 // block, and we don't want to split critical edges.
1952 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1953 PE(MyStates.pred_end()); PI != PE; ++PI) {
1954 BasicBlock *Pred = *PI;
1955 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1956 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1959 // If ARC Annotations are enabled, output the current state of pointers at the
1960 // top of the basic block.
1961 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
1963 return NestingDetected;
1967 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1968 DenseMap<Value *, RRInfo> &Releases,
1969 BBState &MyStates) {
1970 bool NestingDetected = false;
1971 ARCInstKind Class = GetARCInstKind(Inst);
1972 const Value *Arg = nullptr;
1975 case ARCInstKind::RetainBlock:
1976 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1977 // objc_retainBlocks to objc_retains. Thus at this point any
1978 // objc_retainBlocks that we see are not optimizable.
1980 case ARCInstKind::Retain:
1981 case ARCInstKind::RetainRV: {
1982 Arg = GetArgRCIdentityRoot(Inst);
1984 PtrState &S = MyStates.getPtrTopDownState(Arg);
1986 // Don't do retain+release tracking for ARCInstKind::RetainRV, because
1988 // better to let it remain as the first instruction after a call.
1989 if (Class != ARCInstKind::RetainRV) {
1990 // If we see two retains in a row on the same pointer. If so, make
1991 // a note, and we'll cicle back to revisit it after we've
1992 // hopefully eliminated the second retain, which may allow us to
1993 // eliminate the first retain too.
1994 // Theoretically we could implement removal of nested retain+release
1995 // pairs by making PtrState hold a stack of states, but this is
1996 // simple and avoids adding overhead for the non-nested case.
1997 if (S.GetSeq() == S_Retain)
1998 NestingDetected = true;
2000 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2001 S.ResetSequenceProgress(S_Retain);
2002 S.SetKnownSafe(S.HasKnownPositiveRefCount());
2006 S.SetKnownPositiveRefCount();
2008 // A retain can be a potential use; procede to the generic checking
2012 case ARCInstKind::Release: {
2013 Arg = GetArgRCIdentityRoot(Inst);
2015 PtrState &S = MyStates.getPtrTopDownState(Arg);
2016 S.ClearKnownPositiveRefCount();
2018 Sequence OldSeq = S.GetSeq();
2020 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2025 if (OldSeq == S_Retain || ReleaseMetadata != nullptr)
2026 S.ClearReverseInsertPts();
2029 S.SetReleaseMetadata(ReleaseMetadata);
2030 S.SetTailCallRelease(cast<CallInst>(Inst)->isTailCall());
2031 Releases[Inst] = S.GetRRInfo();
2032 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2033 S.ClearSequenceProgress();
2039 case S_MovableRelease:
2040 llvm_unreachable("top-down pointer in release state!");
2044 case ARCInstKind::AutoreleasepoolPop:
2045 // Conservatively, clear MyStates for all known pointers.
2046 MyStates.clearTopDownPointers();
2047 return NestingDetected;
2048 case ARCInstKind::AutoreleasepoolPush:
2049 case ARCInstKind::None:
2050 // These are irrelevant.
2051 return NestingDetected;
2056 // Consider any other possible effects of this instruction on each
2057 // pointer being tracked.
2058 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2059 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2060 const Value *Ptr = MI->first;
2062 continue; // Handled above.
2063 PtrState &S = MI->second;
2064 Sequence Seq = S.GetSeq();
2066 // Check for possible releases.
2067 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2068 DEBUG(dbgs() << "CanAlterRefCount: Seq: " << Seq << "; " << *Ptr
2070 S.ClearKnownPositiveRefCount();
2073 S.SetSeq(S_CanRelease);
2074 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2075 assert(!S.HasReverseInsertPts());
2076 S.InsertReverseInsertPt(Inst);
2078 // One call can't cause a transition from S_Retain to S_CanRelease
2079 // and S_CanRelease to S_Use. If we've made the first transition,
2088 case S_MovableRelease:
2089 llvm_unreachable("top-down pointer in release state!");
2093 // Check for possible direct uses.
2096 if (CanUse(Inst, Ptr, PA, Class)) {
2097 DEBUG(dbgs() << "CanUse: Seq: " << Seq << "; " << *Ptr
2100 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2109 case S_MovableRelease:
2110 llvm_unreachable("top-down pointer in release state!");
2114 return NestingDetected;
2118 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2119 DenseMap<const BasicBlock *, BBState> &BBStates,
2120 DenseMap<Value *, RRInfo> &Releases) {
2121 DEBUG(dbgs() << "\n== ObjCARCOpt::VisitTopDown ==\n");
2122 bool NestingDetected = false;
2123 BBState &MyStates = BBStates[BB];
2125 // Merge the states from each predecessor to compute the initial state
2126 // for the current block.
2127 BBState::edge_iterator PI(MyStates.pred_begin()),
2128 PE(MyStates.pred_end());
2130 const BasicBlock *Pred = *PI;
2131 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2132 assert(I != BBStates.end());
2133 MyStates.InitFromPred(I->second);
2135 for (; PI != PE; ++PI) {
2137 I = BBStates.find(Pred);
2138 assert(I != BBStates.end());
2139 MyStates.MergePred(I->second);
2143 // If ARC Annotations are enabled, output the current state of pointers at the
2144 // top of the basic block.
2145 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2147 // Visit all the instructions, top-down.
2148 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2149 Instruction *Inst = I;
2151 DEBUG(dbgs() << "Visiting " << *Inst << "\n");
2153 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2156 // If ARC Annotations are enabled, output the current state of pointers at the
2157 // bottom of the basic block.
2158 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2160 #ifdef ARC_ANNOTATIONS
2161 if (!(EnableARCAnnotations && DisableCheckForCFGHazards))
2163 CheckForCFGHazards(BB, BBStates, MyStates);
2164 return NestingDetected;
2168 ComputePostOrders(Function &F,
2169 SmallVectorImpl<BasicBlock *> &PostOrder,
2170 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2171 unsigned NoObjCARCExceptionsMDKind,
2172 DenseMap<const BasicBlock *, BBState> &BBStates) {
2173 /// The visited set, for doing DFS walks.
2174 SmallPtrSet<BasicBlock *, 16> Visited;
2176 // Do DFS, computing the PostOrder.
2177 SmallPtrSet<BasicBlock *, 16> OnStack;
2178 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2180 // Functions always have exactly one entry block, and we don't have
2181 // any other block that we treat like an entry block.
2182 BasicBlock *EntryBB = &F.getEntryBlock();
2183 BBState &MyStates = BBStates[EntryBB];
2184 MyStates.SetAsEntry();
2185 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2186 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2187 Visited.insert(EntryBB);
2188 OnStack.insert(EntryBB);
2191 BasicBlock *CurrBB = SuccStack.back().first;
2192 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2193 succ_iterator SE(TI, false);
2195 while (SuccStack.back().second != SE) {
2196 BasicBlock *SuccBB = *SuccStack.back().second++;
2197 if (Visited.insert(SuccBB).second) {
2198 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2199 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2200 BBStates[CurrBB].addSucc(SuccBB);
2201 BBState &SuccStates = BBStates[SuccBB];
2202 SuccStates.addPred(CurrBB);
2203 OnStack.insert(SuccBB);
2207 if (!OnStack.count(SuccBB)) {
2208 BBStates[CurrBB].addSucc(SuccBB);
2209 BBStates[SuccBB].addPred(CurrBB);
2212 OnStack.erase(CurrBB);
2213 PostOrder.push_back(CurrBB);
2214 SuccStack.pop_back();
2215 } while (!SuccStack.empty());
2219 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2220 // Functions may have many exits, and there also blocks which we treat
2221 // as exits due to ignored edges.
2222 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2223 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2224 BasicBlock *ExitBB = I;
2225 BBState &MyStates = BBStates[ExitBB];
2226 if (!MyStates.isExit())
2229 MyStates.SetAsExit();
2231 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2232 Visited.insert(ExitBB);
2233 while (!PredStack.empty()) {
2234 reverse_dfs_next_succ:
2235 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2236 while (PredStack.back().second != PE) {
2237 BasicBlock *BB = *PredStack.back().second++;
2238 if (Visited.insert(BB).second) {
2239 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2240 goto reverse_dfs_next_succ;
2243 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2248 // Visit the function both top-down and bottom-up.
2250 ObjCARCOpt::Visit(Function &F,
2251 DenseMap<const BasicBlock *, BBState> &BBStates,
2252 MapVector<Value *, RRInfo> &Retains,
2253 DenseMap<Value *, RRInfo> &Releases) {
2255 // Use reverse-postorder traversals, because we magically know that loops
2256 // will be well behaved, i.e. they won't repeatedly call retain on a single
2257 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2258 // class here because we want the reverse-CFG postorder to consider each
2259 // function exit point, and we want to ignore selected cycle edges.
2260 SmallVector<BasicBlock *, 16> PostOrder;
2261 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2262 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2263 NoObjCARCExceptionsMDKind,
2266 // Use reverse-postorder on the reverse CFG for bottom-up.
2267 bool BottomUpNestingDetected = false;
2268 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2269 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2271 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2273 // Use reverse-postorder for top-down.
2274 bool TopDownNestingDetected = false;
2275 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2276 PostOrder.rbegin(), E = PostOrder.rend();
2278 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2280 return TopDownNestingDetected && BottomUpNestingDetected;
2283 /// Move the calls in RetainsToMove and ReleasesToMove.
2284 void ObjCARCOpt::MoveCalls(Value *Arg,
2285 RRInfo &RetainsToMove,
2286 RRInfo &ReleasesToMove,
2287 MapVector<Value *, RRInfo> &Retains,
2288 DenseMap<Value *, RRInfo> &Releases,
2289 SmallVectorImpl<Instruction *> &DeadInsts,
2291 Type *ArgTy = Arg->getType();
2292 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2294 DEBUG(dbgs() << "== ObjCARCOpt::MoveCalls ==\n");
2296 // Insert the new retain and release calls.
2297 for (Instruction *InsertPt : ReleasesToMove.ReverseInsertPts) {
2298 Value *MyArg = ArgTy == ParamTy ? Arg :
2299 new BitCastInst(Arg, ParamTy, "", InsertPt);
2300 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2301 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2302 Call->setDoesNotThrow();
2303 Call->setTailCall();
2305 DEBUG(dbgs() << "Inserting new Retain: " << *Call << "\n"
2306 "At insertion point: " << *InsertPt << "\n");
2308 for (Instruction *InsertPt : RetainsToMove.ReverseInsertPts) {
2309 Value *MyArg = ArgTy == ParamTy ? Arg :
2310 new BitCastInst(Arg, ParamTy, "", InsertPt);
2311 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Release);
2312 CallInst *Call = CallInst::Create(Decl, MyArg, "", InsertPt);
2313 // Attach a clang.imprecise_release metadata tag, if appropriate.
2314 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2315 Call->setMetadata(ImpreciseReleaseMDKind, M);
2316 Call->setDoesNotThrow();
2317 if (ReleasesToMove.IsTailCallRelease)
2318 Call->setTailCall();
2320 DEBUG(dbgs() << "Inserting new Release: " << *Call << "\n"
2321 "At insertion point: " << *InsertPt << "\n");
2324 // Delete the original retain and release calls.
2325 for (Instruction *OrigRetain : RetainsToMove.Calls) {
2326 Retains.blot(OrigRetain);
2327 DeadInsts.push_back(OrigRetain);
2328 DEBUG(dbgs() << "Deleting retain: " << *OrigRetain << "\n");
2330 for (Instruction *OrigRelease : ReleasesToMove.Calls) {
2331 Releases.erase(OrigRelease);
2332 DeadInsts.push_back(OrigRelease);
2333 DEBUG(dbgs() << "Deleting release: " << *OrigRelease << "\n");
2339 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2341 MapVector<Value *, RRInfo> &Retains,
2342 DenseMap<Value *, RRInfo> &Releases,
2344 SmallVectorImpl<Instruction *> &NewRetains,
2345 SmallVectorImpl<Instruction *> &NewReleases,
2346 SmallVectorImpl<Instruction *> &DeadInsts,
2347 RRInfo &RetainsToMove,
2348 RRInfo &ReleasesToMove,
2351 bool &AnyPairsCompletelyEliminated) {
2352 // If a pair happens in a region where it is known that the reference count
2353 // is already incremented, we can similarly ignore possible decrements unless
2354 // we are dealing with a retainable object with multiple provenance sources.
2355 bool KnownSafeTD = true, KnownSafeBU = true;
2356 bool MultipleOwners = false;
2357 bool CFGHazardAfflicted = false;
2359 // Connect the dots between the top-down-collected RetainsToMove and
2360 // bottom-up-collected ReleasesToMove to form sets of related calls.
2361 // This is an iterative process so that we connect multiple releases
2362 // to multiple retains if needed.
2363 unsigned OldDelta = 0;
2364 unsigned NewDelta = 0;
2365 unsigned OldCount = 0;
2366 unsigned NewCount = 0;
2367 bool FirstRelease = true;
2369 for (SmallVectorImpl<Instruction *>::const_iterator
2370 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2371 Instruction *NewRetain = *NI;
2372 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2373 assert(It != Retains.end());
2374 const RRInfo &NewRetainRRI = It->second;
2375 KnownSafeTD &= NewRetainRRI.KnownSafe;
2377 MultipleOwners || MultiOwnersSet.count(GetArgRCIdentityRoot(NewRetain));
2378 for (Instruction *NewRetainRelease : NewRetainRRI.Calls) {
2379 DenseMap<Value *, RRInfo>::const_iterator Jt =
2380 Releases.find(NewRetainRelease);
2381 if (Jt == Releases.end())
2383 const RRInfo &NewRetainReleaseRRI = Jt->second;
2385 // If the release does not have a reference to the retain as well,
2386 // something happened which is unaccounted for. Do not do anything.
2388 // This can happen if we catch an additive overflow during path count
2390 if (!NewRetainReleaseRRI.Calls.count(NewRetain))
2393 if (ReleasesToMove.Calls.insert(NewRetainRelease).second) {
2395 // If we overflow when we compute the path count, don't remove/move
2397 const BBState &NRRBBState = BBStates[NewRetainRelease->getParent()];
2398 unsigned PathCount = BBState::OverflowOccurredValue;
2399 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2401 assert(PathCount != BBState::OverflowOccurredValue &&
2402 "PathCount at this point can not be "
2403 "OverflowOccurredValue.");
2404 OldDelta -= PathCount;
2406 // Merge the ReleaseMetadata and IsTailCallRelease values.
2408 ReleasesToMove.ReleaseMetadata =
2409 NewRetainReleaseRRI.ReleaseMetadata;
2410 ReleasesToMove.IsTailCallRelease =
2411 NewRetainReleaseRRI.IsTailCallRelease;
2412 FirstRelease = false;
2414 if (ReleasesToMove.ReleaseMetadata !=
2415 NewRetainReleaseRRI.ReleaseMetadata)
2416 ReleasesToMove.ReleaseMetadata = nullptr;
2417 if (ReleasesToMove.IsTailCallRelease !=
2418 NewRetainReleaseRRI.IsTailCallRelease)
2419 ReleasesToMove.IsTailCallRelease = false;
2422 // Collect the optimal insertion points.
2424 for (Instruction *RIP : NewRetainReleaseRRI.ReverseInsertPts) {
2425 if (ReleasesToMove.ReverseInsertPts.insert(RIP).second) {
2426 // If we overflow when we compute the path count, don't
2427 // remove/move anything.
2428 const BBState &RIPBBState = BBStates[RIP->getParent()];
2429 PathCount = BBState::OverflowOccurredValue;
2430 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2432 assert(PathCount != BBState::OverflowOccurredValue &&
2433 "PathCount at this point can not be "
2434 "OverflowOccurredValue.");
2435 NewDelta -= PathCount;
2438 NewReleases.push_back(NewRetainRelease);
2443 if (NewReleases.empty()) break;
2445 // Back the other way.
2446 for (SmallVectorImpl<Instruction *>::const_iterator
2447 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2448 Instruction *NewRelease = *NI;
2449 DenseMap<Value *, RRInfo>::const_iterator It =
2450 Releases.find(NewRelease);
2451 assert(It != Releases.end());
2452 const RRInfo &NewReleaseRRI = It->second;
2453 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2454 CFGHazardAfflicted |= NewReleaseRRI.CFGHazardAfflicted;
2455 for (Instruction *NewReleaseRetain : NewReleaseRRI.Calls) {
2456 MapVector<Value *, RRInfo>::const_iterator Jt =
2457 Retains.find(NewReleaseRetain);
2458 if (Jt == Retains.end())
2460 const RRInfo &NewReleaseRetainRRI = Jt->second;
2462 // If the retain does not have a reference to the release as well,
2463 // something happened which is unaccounted for. Do not do anything.
2465 // This can happen if we catch an additive overflow during path count
2467 if (!NewReleaseRetainRRI.Calls.count(NewRelease))
2470 if (RetainsToMove.Calls.insert(NewReleaseRetain).second) {
2471 // If we overflow when we compute the path count, don't remove/move
2473 const BBState &NRRBBState = BBStates[NewReleaseRetain->getParent()];
2474 unsigned PathCount = BBState::OverflowOccurredValue;
2475 if (NRRBBState.GetAllPathCountWithOverflow(PathCount))
2477 assert(PathCount != BBState::OverflowOccurredValue &&
2478 "PathCount at this point can not be "
2479 "OverflowOccurredValue.");
2480 OldDelta += PathCount;
2481 OldCount += PathCount;
2483 // Collect the optimal insertion points.
2485 for (Instruction *RIP : NewReleaseRetainRRI.ReverseInsertPts) {
2486 if (RetainsToMove.ReverseInsertPts.insert(RIP).second) {
2487 // If we overflow when we compute the path count, don't
2488 // remove/move anything.
2489 const BBState &RIPBBState = BBStates[RIP->getParent()];
2491 PathCount = BBState::OverflowOccurredValue;
2492 if (RIPBBState.GetAllPathCountWithOverflow(PathCount))
2494 assert(PathCount != BBState::OverflowOccurredValue &&
2495 "PathCount at this point can not be "
2496 "OverflowOccurredValue.");
2497 NewDelta += PathCount;
2498 NewCount += PathCount;
2501 NewRetains.push_back(NewReleaseRetain);
2505 NewReleases.clear();
2506 if (NewRetains.empty()) break;
2509 // If the pointer is known incremented in 1 direction and we do not have
2510 // MultipleOwners, we can safely remove the retain/releases. Otherwise we need
2511 // to be known safe in both directions.
2512 bool UnconditionallySafe = (KnownSafeTD && KnownSafeBU) ||
2513 ((KnownSafeTD || KnownSafeBU) && !MultipleOwners);
2514 if (UnconditionallySafe) {
2515 RetainsToMove.ReverseInsertPts.clear();
2516 ReleasesToMove.ReverseInsertPts.clear();
2519 // Determine whether the new insertion points we computed preserve the
2520 // balance of retain and release calls through the program.
2521 // TODO: If the fully aggressive solution isn't valid, try to find a
2522 // less aggressive solution which is.
2526 // At this point, we are not going to remove any RR pairs, but we still are
2527 // able to move RR pairs. If one of our pointers is afflicted with
2528 // CFGHazards, we cannot perform such code motion so exit early.
2529 const bool WillPerformCodeMotion = RetainsToMove.ReverseInsertPts.size() ||
2530 ReleasesToMove.ReverseInsertPts.size();
2531 if (CFGHazardAfflicted && WillPerformCodeMotion)
2535 // Determine whether the original call points are balanced in the retain and
2536 // release calls through the program. If not, conservatively don't touch
2538 // TODO: It's theoretically possible to do code motion in this case, as
2539 // long as the existing imbalances are maintained.
2543 #ifdef ARC_ANNOTATIONS
2544 // Do not move calls if ARC annotations are requested.
2545 if (EnableARCAnnotations)
2547 #endif // ARC_ANNOTATIONS
2550 assert(OldCount != 0 && "Unreachable code?");
2551 NumRRs += OldCount - NewCount;
2552 // Set to true if we completely removed any RR pairs.
2553 AnyPairsCompletelyEliminated = NewCount == 0;
2555 // We can move calls!
2559 /// Identify pairings between the retains and releases, and delete and/or move
2562 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2564 MapVector<Value *, RRInfo> &Retains,
2565 DenseMap<Value *, RRInfo> &Releases,
2567 DEBUG(dbgs() << "\n== ObjCARCOpt::PerformCodePlacement ==\n");
2569 bool AnyPairsCompletelyEliminated = false;
2570 RRInfo RetainsToMove;
2571 RRInfo ReleasesToMove;
2572 SmallVector<Instruction *, 4> NewRetains;
2573 SmallVector<Instruction *, 4> NewReleases;
2574 SmallVector<Instruction *, 8> DeadInsts;
2576 // Visit each retain.
2577 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2578 E = Retains.end(); I != E; ++I) {
2579 Value *V = I->first;
2580 if (!V) continue; // blotted
2582 Instruction *Retain = cast<Instruction>(V);
2584 DEBUG(dbgs() << "Visiting: " << *Retain << "\n");
2586 Value *Arg = GetArgRCIdentityRoot(Retain);
2588 // If the object being released is in static or stack storage, we know it's
2589 // not being managed by ObjC reference counting, so we can delete pairs
2590 // regardless of what possible decrements or uses lie between them.
2591 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2593 // A constant pointer can't be pointing to an object on the heap. It may
2594 // be reference-counted, but it won't be deleted.
2595 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2596 if (const GlobalVariable *GV =
2597 dyn_cast<GlobalVariable>(
2598 GetRCIdentityRoot(LI->getPointerOperand())))
2599 if (GV->isConstant())
2602 // Connect the dots between the top-down-collected RetainsToMove and
2603 // bottom-up-collected ReleasesToMove to form sets of related calls.
2604 NewRetains.push_back(Retain);
2605 bool PerformMoveCalls =
2606 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2607 NewReleases, DeadInsts, RetainsToMove,
2608 ReleasesToMove, Arg, KnownSafe,
2609 AnyPairsCompletelyEliminated);
2611 if (PerformMoveCalls) {
2612 // Ok, everything checks out and we're all set. Let's move/delete some
2614 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2615 Retains, Releases, DeadInsts, M);
2618 // Clean up state for next retain.
2619 NewReleases.clear();
2621 RetainsToMove.clear();
2622 ReleasesToMove.clear();
2625 // Now that we're done moving everything, we can delete the newly dead
2626 // instructions, as we no longer need them as insert points.
2627 while (!DeadInsts.empty())
2628 EraseInstruction(DeadInsts.pop_back_val());
2630 return AnyPairsCompletelyEliminated;
2633 /// Weak pointer optimizations.
2634 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2635 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeWeakCalls ==\n");
2637 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2638 // itself because it uses AliasAnalysis and we need to do provenance
2640 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2641 Instruction *Inst = &*I++;
2643 DEBUG(dbgs() << "Visiting: " << *Inst << "\n");
2645 ARCInstKind Class = GetBasicARCInstKind(Inst);
2646 if (Class != ARCInstKind::LoadWeak &&
2647 Class != ARCInstKind::LoadWeakRetained)
2650 // Delete objc_loadWeak calls with no users.
2651 if (Class == ARCInstKind::LoadWeak && Inst->use_empty()) {
2652 Inst->eraseFromParent();
2656 // TODO: For now, just look for an earlier available version of this value
2657 // within the same block. Theoretically, we could do memdep-style non-local
2658 // analysis too, but that would want caching. A better approach would be to
2659 // use the technique that EarlyCSE uses.
2660 inst_iterator Current = std::prev(I);
2661 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2662 for (BasicBlock::iterator B = CurrentBB->begin(),
2663 J = Current.getInstructionIterator();
2665 Instruction *EarlierInst = &*std::prev(J);
2666 ARCInstKind EarlierClass = GetARCInstKind(EarlierInst);
2667 switch (EarlierClass) {
2668 case ARCInstKind::LoadWeak:
2669 case ARCInstKind::LoadWeakRetained: {
2670 // If this is loading from the same pointer, replace this load's value
2672 CallInst *Call = cast<CallInst>(Inst);
2673 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2674 Value *Arg = Call->getArgOperand(0);
2675 Value *EarlierArg = EarlierCall->getArgOperand(0);
2676 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2677 case AliasAnalysis::MustAlias:
2679 // If the load has a builtin retain, insert a plain retain for it.
2680 if (Class == ARCInstKind::LoadWeakRetained) {
2681 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2682 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2685 // Zap the fully redundant load.
2686 Call->replaceAllUsesWith(EarlierCall);
2687 Call->eraseFromParent();
2689 case AliasAnalysis::MayAlias:
2690 case AliasAnalysis::PartialAlias:
2692 case AliasAnalysis::NoAlias:
2697 case ARCInstKind::StoreWeak:
2698 case ARCInstKind::InitWeak: {
2699 // If this is storing to the same pointer and has the same size etc.
2700 // replace this load's value with the stored value.
2701 CallInst *Call = cast<CallInst>(Inst);
2702 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2703 Value *Arg = Call->getArgOperand(0);
2704 Value *EarlierArg = EarlierCall->getArgOperand(0);
2705 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2706 case AliasAnalysis::MustAlias:
2708 // If the load has a builtin retain, insert a plain retain for it.
2709 if (Class == ARCInstKind::LoadWeakRetained) {
2710 Constant *Decl = EP.get(ARCRuntimeEntryPoints::EPT_Retain);
2711 CallInst *CI = CallInst::Create(Decl, EarlierCall, "", Call);
2714 // Zap the fully redundant load.
2715 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2716 Call->eraseFromParent();
2718 case AliasAnalysis::MayAlias:
2719 case AliasAnalysis::PartialAlias:
2721 case AliasAnalysis::NoAlias:
2726 case ARCInstKind::MoveWeak:
2727 case ARCInstKind::CopyWeak:
2728 // TOOD: Grab the copied value.
2730 case ARCInstKind::AutoreleasepoolPush:
2731 case ARCInstKind::None:
2732 case ARCInstKind::IntrinsicUser:
2733 case ARCInstKind::User:
2734 // Weak pointers are only modified through the weak entry points
2735 // (and arbitrary calls, which could call the weak entry points).
2738 // Anything else could modify the weak pointer.
2745 // Then, for each destroyWeak with an alloca operand, check to see if
2746 // the alloca and all its users can be zapped.
2747 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2748 Instruction *Inst = &*I++;
2749 ARCInstKind Class = GetBasicARCInstKind(Inst);
2750 if (Class != ARCInstKind::DestroyWeak)
2753 CallInst *Call = cast<CallInst>(Inst);
2754 Value *Arg = Call->getArgOperand(0);
2755 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2756 for (User *U : Alloca->users()) {
2757 const Instruction *UserInst = cast<Instruction>(U);
2758 switch (GetBasicARCInstKind(UserInst)) {
2759 case ARCInstKind::InitWeak:
2760 case ARCInstKind::StoreWeak:
2761 case ARCInstKind::DestroyWeak:
2768 for (auto UI = Alloca->user_begin(), UE = Alloca->user_end(); UI != UE;) {
2769 CallInst *UserInst = cast<CallInst>(*UI++);
2770 switch (GetBasicARCInstKind(UserInst)) {
2771 case ARCInstKind::InitWeak:
2772 case ARCInstKind::StoreWeak:
2773 // These functions return their second argument.
2774 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2776 case ARCInstKind::DestroyWeak:
2780 llvm_unreachable("alloca really is used!");
2782 UserInst->eraseFromParent();
2784 Alloca->eraseFromParent();
2790 /// Identify program paths which execute sequences of retains and releases which
2791 /// can be eliminated.
2792 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2793 // Releases, Retains - These are used to store the results of the main flow
2794 // analysis. These use Value* as the key instead of Instruction* so that the
2795 // map stays valid when we get around to rewriting code and calls get
2796 // replaced by arguments.
2797 DenseMap<Value *, RRInfo> Releases;
2798 MapVector<Value *, RRInfo> Retains;
2800 // This is used during the traversal of the function to track the
2801 // states for each identified object at each block.
2802 DenseMap<const BasicBlock *, BBState> BBStates;
2804 // Analyze the CFG of the function, and all instructions.
2805 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2808 bool AnyPairsCompletelyEliminated = PerformCodePlacement(BBStates, Retains,
2813 MultiOwnersSet.clear();
2815 return AnyPairsCompletelyEliminated && NestingDetected;
2818 /// Check if there is a dependent call earlier that does not have anything in
2819 /// between the Retain and the call that can affect the reference count of their
2820 /// shared pointer argument. Note that Retain need not be in BB.
2822 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2823 SmallPtrSetImpl<Instruction *> &DepInsts,
2824 SmallPtrSetImpl<const BasicBlock *> &Visited,
2825 ProvenanceAnalysis &PA) {
2826 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2827 DepInsts, Visited, PA);
2828 if (DepInsts.size() != 1)
2832 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2834 // Check that the pointer is the return value of the call.
2835 if (!Call || Arg != Call)
2838 // Check that the call is a regular call.
2839 ARCInstKind Class = GetBasicARCInstKind(Call);
2840 if (Class != ARCInstKind::CallOrUser && Class != ARCInstKind::Call)
2846 /// Find a dependent retain that precedes the given autorelease for which there
2847 /// is nothing in between the two instructions that can affect the ref count of
2850 FindPredecessorRetainWithSafePath(const Value *Arg, BasicBlock *BB,
2851 Instruction *Autorelease,
2852 SmallPtrSetImpl<Instruction *> &DepInsts,
2853 SmallPtrSetImpl<const BasicBlock *> &Visited,
2854 ProvenanceAnalysis &PA) {
2855 FindDependencies(CanChangeRetainCount, Arg,
2856 BB, Autorelease, DepInsts, Visited, PA);
2857 if (DepInsts.size() != 1)
2861 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2863 // Check that we found a retain with the same argument.
2864 if (!Retain || !IsRetain(GetBasicARCInstKind(Retain)) ||
2865 GetArgRCIdentityRoot(Retain) != Arg) {
2872 /// Look for an ``autorelease'' instruction dependent on Arg such that there are
2873 /// no instructions dependent on Arg that need a positive ref count in between
2874 /// the autorelease and the ret.
2876 FindPredecessorAutoreleaseWithSafePath(const Value *Arg, BasicBlock *BB,
2878 SmallPtrSetImpl<Instruction *> &DepInsts,
2879 SmallPtrSetImpl<const BasicBlock *> &V,
2880 ProvenanceAnalysis &PA) {
2881 FindDependencies(NeedsPositiveRetainCount, Arg,
2882 BB, Ret, DepInsts, V, PA);
2883 if (DepInsts.size() != 1)
2886 CallInst *Autorelease =
2887 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2890 ARCInstKind AutoreleaseClass = GetBasicARCInstKind(Autorelease);
2891 if (!IsAutorelease(AutoreleaseClass))
2893 if (GetArgRCIdentityRoot(Autorelease) != Arg)
2899 /// Look for this pattern:
2901 /// %call = call i8* @something(...)
2902 /// %2 = call i8* @objc_retain(i8* %call)
2903 /// %3 = call i8* @objc_autorelease(i8* %2)
2906 /// And delete the retain and autorelease.
2907 void ObjCARCOpt::OptimizeReturns(Function &F) {
2908 if (!F.getReturnType()->isPointerTy())
2911 DEBUG(dbgs() << "\n== ObjCARCOpt::OptimizeReturns ==\n");
2913 SmallPtrSet<Instruction *, 4> DependingInstructions;
2914 SmallPtrSet<const BasicBlock *, 4> Visited;
2915 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2916 BasicBlock *BB = FI;
2917 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2919 DEBUG(dbgs() << "Visiting: " << *Ret << "\n");
2924 const Value *Arg = GetRCIdentityRoot(Ret->getOperand(0));
2926 // Look for an ``autorelease'' instruction that is a predecessor of Ret and
2927 // dependent on Arg such that there are no instructions dependent on Arg
2928 // that need a positive ref count in between the autorelease and Ret.
2929 CallInst *Autorelease =
2930 FindPredecessorAutoreleaseWithSafePath(Arg, BB, Ret,
2931 DependingInstructions, Visited,
2933 DependingInstructions.clear();
2940 FindPredecessorRetainWithSafePath(Arg, BB, Autorelease,
2941 DependingInstructions, Visited, PA);
2942 DependingInstructions.clear();
2948 // Check that there is nothing that can affect the reference count
2949 // between the retain and the call. Note that Retain need not be in BB.
2950 bool HasSafePathToCall = HasSafePathToPredecessorCall(Arg, Retain,
2951 DependingInstructions,
2953 DependingInstructions.clear();
2956 if (!HasSafePathToCall)
2959 // If so, we can zap the retain and autorelease.
2962 DEBUG(dbgs() << "Erasing: " << *Retain << "\nErasing: "
2963 << *Autorelease << "\n");
2964 EraseInstruction(Retain);
2965 EraseInstruction(Autorelease);
2971 ObjCARCOpt::GatherStatistics(Function &F, bool AfterOptimization) {
2972 llvm::Statistic &NumRetains =
2973 AfterOptimization? NumRetainsAfterOpt : NumRetainsBeforeOpt;
2974 llvm::Statistic &NumReleases =
2975 AfterOptimization? NumReleasesAfterOpt : NumReleasesBeforeOpt;
2977 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2978 Instruction *Inst = &*I++;
2979 switch (GetBasicARCInstKind(Inst)) {
2982 case ARCInstKind::Retain:
2985 case ARCInstKind::Release:
2993 bool ObjCARCOpt::doInitialization(Module &M) {
2997 // If nothing in the Module uses ARC, don't do anything.
2998 Run = ModuleHasARC(M);
3002 // Identify the imprecise release metadata kind.
3003 ImpreciseReleaseMDKind =
3004 M.getContext().getMDKindID("clang.imprecise_release");
3005 CopyOnEscapeMDKind =
3006 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3007 NoObjCARCExceptionsMDKind =
3008 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3009 #ifdef ARC_ANNOTATIONS
3010 ARCAnnotationBottomUpMDKind =
3011 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
3012 ARCAnnotationTopDownMDKind =
3013 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
3014 ARCAnnotationProvenanceSourceMDKind =
3015 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
3016 #endif // ARC_ANNOTATIONS
3018 // Intuitively, objc_retain and others are nocapture, however in practice
3019 // they are not, because they return their argument value. And objc_release
3020 // calls finalizers which can have arbitrary side effects.
3022 // Initialize our runtime entry point cache.
3028 bool ObjCARCOpt::runOnFunction(Function &F) {
3032 // If nothing in the Module uses ARC, don't do anything.
3038 DEBUG(dbgs() << "<<< ObjCARCOpt: Visiting Function: " << F.getName() << " >>>"
3041 PA.setAA(&getAnalysis<AliasAnalysis>());
3044 if (AreStatisticsEnabled()) {
3045 GatherStatistics(F, false);
3049 // This pass performs several distinct transformations. As a compile-time aid
3050 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3051 // library functions aren't declared.
3053 // Preliminary optimizations. This also computes UsedInThisFunction.
3054 OptimizeIndividualCalls(F);
3056 // Optimizations for weak pointers.
3057 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::LoadWeak)) |
3058 (1 << unsigned(ARCInstKind::LoadWeakRetained)) |
3059 (1 << unsigned(ARCInstKind::StoreWeak)) |
3060 (1 << unsigned(ARCInstKind::InitWeak)) |
3061 (1 << unsigned(ARCInstKind::CopyWeak)) |
3062 (1 << unsigned(ARCInstKind::MoveWeak)) |
3063 (1 << unsigned(ARCInstKind::DestroyWeak))))
3064 OptimizeWeakCalls(F);
3066 // Optimizations for retain+release pairs.
3067 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Retain)) |
3068 (1 << unsigned(ARCInstKind::RetainRV)) |
3069 (1 << unsigned(ARCInstKind::RetainBlock))))
3070 if (UsedInThisFunction & (1 << unsigned(ARCInstKind::Release)))
3071 // Run OptimizeSequences until it either stops making changes or
3072 // no retain+release pair nesting is detected.
3073 while (OptimizeSequences(F)) {}
3075 // Optimizations if objc_autorelease is used.
3076 if (UsedInThisFunction & ((1 << unsigned(ARCInstKind::Autorelease)) |
3077 (1 << unsigned(ARCInstKind::AutoreleaseRV))))
3080 // Gather statistics after optimization.
3082 if (AreStatisticsEnabled()) {
3083 GatherStatistics(F, true);
3087 DEBUG(dbgs() << "\n");
3092 void ObjCARCOpt::releaseMemory() {