1 //===- ObjCARCOpts.cpp - ObjC ARC Optimization ----------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file defines ObjC ARC optimizations. ARC stands for Automatic
11 /// Reference Counting and is a system for managing reference counts for objects
14 /// The optimizations performed include elimination of redundant, partially
15 /// redundant, and inconsequential reference count operations, elimination of
16 /// redundant weak pointer operations, and numerous minor simplifications.
18 /// WARNING: This file knows about certain library functions. It recognizes them
19 /// by name, and hardwires knowledge of their semantics.
21 /// WARNING: This file knows about how certain Objective-C library functions are
22 /// used. Naive LLVM IR transformations which would otherwise be
23 /// behavior-preserving may break these assumptions.
25 //===----------------------------------------------------------------------===//
27 #define DEBUG_TYPE "objc-arc-opts"
29 #include "DependencyAnalysis.h"
30 #include "ObjCARCAliasAnalysis.h"
31 #include "ProvenanceAnalysis.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/IR/IRBuilder.h"
37 #include "llvm/IR/LLVMContext.h"
38 #include "llvm/Support/CFG.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/raw_ostream.h"
43 using namespace llvm::objcarc;
45 /// \defgroup MiscUtils Miscellaneous utilities that are not ARC specific.
49 /// \brief An associative container with fast insertion-order (deterministic)
50 /// iteration over its elements. Plus the special blot operation.
51 template<class KeyT, class ValueT>
53 /// Map keys to indices in Vector.
54 typedef DenseMap<KeyT, size_t> MapTy;
57 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
62 typedef typename VectorTy::iterator iterator;
63 typedef typename VectorTy::const_iterator const_iterator;
64 iterator begin() { return Vector.begin(); }
65 iterator end() { return Vector.end(); }
66 const_iterator begin() const { return Vector.begin(); }
67 const_iterator end() const { return Vector.end(); }
71 assert(Vector.size() >= Map.size()); // May differ due to blotting.
72 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
74 assert(I->second < Vector.size());
75 assert(Vector[I->second].first == I->first);
77 for (typename VectorTy::const_iterator I = Vector.begin(),
78 E = Vector.end(); I != E; ++I)
80 (Map.count(I->first) &&
81 Map[I->first] == size_t(I - Vector.begin())));
85 ValueT &operator[](const KeyT &Arg) {
86 std::pair<typename MapTy::iterator, bool> Pair =
87 Map.insert(std::make_pair(Arg, size_t(0)));
89 size_t Num = Vector.size();
90 Pair.first->second = Num;
91 Vector.push_back(std::make_pair(Arg, ValueT()));
92 return Vector[Num].second;
94 return Vector[Pair.first->second].second;
97 std::pair<iterator, bool>
98 insert(const std::pair<KeyT, ValueT> &InsertPair) {
99 std::pair<typename MapTy::iterator, bool> Pair =
100 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
102 size_t Num = Vector.size();
103 Pair.first->second = Num;
104 Vector.push_back(InsertPair);
105 return std::make_pair(Vector.begin() + Num, true);
107 return std::make_pair(Vector.begin() + Pair.first->second, false);
110 const_iterator find(const KeyT &Key) const {
111 typename MapTy::const_iterator It = Map.find(Key);
112 if (It == Map.end()) return Vector.end();
113 return Vector.begin() + It->second;
116 /// This is similar to erase, but instead of removing the element from the
117 /// vector, it just zeros out the key in the vector. This leaves iterators
118 /// intact, but clients must be prepared for zeroed-out keys when iterating.
119 void blot(const KeyT &Key) {
120 typename MapTy::iterator It = Map.find(Key);
121 if (It == Map.end()) return;
122 Vector[It->second].first = KeyT();
135 /// \defgroup ARCUtilities Utility declarations/definitions specific to ARC.
138 /// \brief This is similar to StripPointerCastsAndObjCCalls but it stops as soon
139 /// as it finds a value with multiple uses.
140 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
141 if (Arg->hasOneUse()) {
142 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
143 return FindSingleUseIdentifiedObject(BC->getOperand(0));
144 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
145 if (GEP->hasAllZeroIndices())
146 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
147 if (IsForwarding(GetBasicInstructionClass(Arg)))
148 return FindSingleUseIdentifiedObject(
149 cast<CallInst>(Arg)->getArgOperand(0));
150 if (!IsObjCIdentifiedObject(Arg))
155 // If we found an identifiable object but it has multiple uses, but they are
156 // trivial uses, we can still consider this to be a single-use value.
157 if (IsObjCIdentifiedObject(Arg)) {
158 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
161 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
171 /// \brief Test whether the given retainable object pointer escapes.
173 /// This differs from regular escape analysis in that a use as an
174 /// argument to a call is not considered an escape.
176 static bool DoesRetainableObjPtrEscape(const User *Ptr) {
177 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Target: " << *Ptr << "\n");
179 // Walk the def-use chains.
180 SmallVector<const Value *, 4> Worklist;
181 Worklist.push_back(Ptr);
182 // If Ptr has any operands add them as well.
183 for (User::const_op_iterator I = Ptr->op_begin(), E = Ptr->op_end(); I != E;
185 Worklist.push_back(*I);
188 // Ensure we do not visit any value twice.
189 SmallPtrSet<const Value *, 8> VisitedSet;
192 const Value *V = Worklist.pop_back_val();
194 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Visiting: " << *V << "\n");
196 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
198 const User *UUser = *UI;
200 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User: " << *UUser << "\n");
202 // Special - Use by a call (callee or argument) is not considered
204 switch (GetBasicInstructionClass(UUser)) {
209 case IC_AutoreleaseRV: {
210 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies pointer "
211 "arguments. Pointer Escapes!\n");
212 // These special functions make copies of their pointer arguments.
215 case IC_IntrinsicUser:
216 // Use by the use intrinsic is not an escape.
220 // Use by an instruction which copies the value is an escape if the
221 // result is an escape.
222 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
223 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
225 if (VisitedSet.insert(UUser)) {
226 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: User copies value. "
227 "Ptr escapes if result escapes. Adding to list.\n");
228 Worklist.push_back(UUser);
230 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Already visited node."
235 // Use by a load is not an escape.
236 if (isa<LoadInst>(UUser))
238 // Use by a store is not an escape if the use is the address.
239 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
240 if (V != SI->getValueOperand())
244 // Regular calls and other stuff are not considered escapes.
247 // Otherwise, conservatively assume an escape.
248 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Assuming ptr escapes.\n");
251 } while (!Worklist.empty());
254 DEBUG(dbgs() << "DoesRetainableObjPtrEscape: Ptr does not escape.\n");
260 /// \defgroup ARCOpt ARC Optimization.
263 // TODO: On code like this:
266 // stuff_that_cannot_release()
267 // objc_autorelease(%x)
268 // stuff_that_cannot_release()
270 // stuff_that_cannot_release()
271 // objc_autorelease(%x)
273 // The second retain and autorelease can be deleted.
275 // TODO: It should be possible to delete
276 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
277 // pairs if nothing is actually autoreleased between them. Also, autorelease
278 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
279 // after inlining) can be turned into plain release calls.
281 // TODO: Critical-edge splitting. If the optimial insertion point is
282 // a critical edge, the current algorithm has to fail, because it doesn't
283 // know how to split edges. It should be possible to make the optimizer
284 // think in terms of edges, rather than blocks, and then split critical
287 // TODO: OptimizeSequences could generalized to be Interprocedural.
289 // TODO: Recognize that a bunch of other objc runtime calls have
290 // non-escaping arguments and non-releasing arguments, and may be
291 // non-autoreleasing.
293 // TODO: Sink autorelease calls as far as possible. Unfortunately we
294 // usually can't sink them past other calls, which would be the main
295 // case where it would be useful.
297 // TODO: The pointer returned from objc_loadWeakRetained is retained.
299 // TODO: Delete release+retain pairs (rare).
301 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
302 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
303 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
304 STATISTIC(NumRets, "Number of return value forwarding "
305 "retain+autoreleaes eliminated");
306 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
307 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
312 /// \brief A sequence of states that a pointer may go through in which an
313 /// objc_retain and objc_release are actually needed.
316 S_Retain, ///< objc_retain(x).
317 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement.
318 S_Use, ///< any use of x.
319 S_Stop, ///< like S_Release, but code motion is stopped.
320 S_Release, ///< objc_release(x).
321 S_MovableRelease ///< objc_release(x), !clang.imprecise_release.
324 raw_ostream &operator<<(raw_ostream &OS, const Sequence S)
325 LLVM_ATTRIBUTE_UNUSED;
326 raw_ostream &operator<<(raw_ostream &OS, const Sequence S) {
329 return OS << "S_None";
331 return OS << "S_Retain";
333 return OS << "S_CanRelease";
335 return OS << "S_Use";
337 return OS << "S_Release";
338 case S_MovableRelease:
339 return OS << "S_MovableRelease";
341 return OS << "S_Stop";
343 llvm_unreachable("Unknown sequence type.");
347 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
351 if (A == S_None || B == S_None)
354 if (A > B) std::swap(A, B);
356 // Choose the side which is further along in the sequence.
357 if ((A == S_Retain || A == S_CanRelease) &&
358 (B == S_CanRelease || B == S_Use))
361 // Choose the side which is further along in the sequence.
362 if ((A == S_Use || A == S_CanRelease) &&
363 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
365 // If both sides are releases, choose the more conservative one.
366 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
368 if (A == S_Release && B == S_MovableRelease)
376 /// \brief Unidirectional information about either a
377 /// retain-decrement-use-release sequence or release-use-decrement-retain
378 /// reverese sequence.
380 /// After an objc_retain, the reference count of the referenced
381 /// object is known to be positive. Similarly, before an objc_release, the
382 /// reference count of the referenced object is known to be positive. If
383 /// there are retain-release pairs in code regions where the retain count
384 /// is known to be positive, they can be eliminated, regardless of any side
385 /// effects between them.
387 /// Also, a retain+release pair nested within another retain+release
388 /// pair all on the known same pointer value can be eliminated, regardless
389 /// of any intervening side effects.
391 /// KnownSafe is true when either of these conditions is satisfied.
394 /// True of the objc_release calls are all marked with the "tail" keyword.
395 bool IsTailCallRelease;
397 /// If the Calls are objc_release calls and they all have a
398 /// clang.imprecise_release tag, this is the metadata tag.
399 MDNode *ReleaseMetadata;
401 /// For a top-down sequence, the set of objc_retains or
402 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
403 SmallPtrSet<Instruction *, 2> Calls;
405 /// The set of optimal insert positions for moving calls in the opposite
407 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
410 KnownSafe(false), IsTailCallRelease(false), ReleaseMetadata(0) {}
416 void RRInfo::clear() {
418 IsTailCallRelease = false;
421 ReverseInsertPts.clear();
425 /// \brief This class summarizes several per-pointer runtime properties which
426 /// are propogated through the flow graph.
428 /// True if the reference count is known to be incremented.
429 bool KnownPositiveRefCount;
431 /// True of we've seen an opportunity for partial RR elimination, such as
432 /// pushing calls into a CFG triangle or into one side of a CFG diamond.
435 /// The current position in the sequence.
439 /// Unidirectional information about the current sequence.
441 /// TODO: Encapsulate this better.
444 PtrState() : KnownPositiveRefCount(false), Partial(false),
447 void SetKnownPositiveRefCount() {
448 KnownPositiveRefCount = true;
451 void ClearKnownPositiveRefCount() {
452 KnownPositiveRefCount = false;
455 bool HasKnownPositiveRefCount() const {
456 return KnownPositiveRefCount;
459 void SetSeq(Sequence NewSeq) {
463 Sequence GetSeq() const {
467 void ClearSequenceProgress() {
468 ResetSequenceProgress(S_None);
471 void ResetSequenceProgress(Sequence NewSeq) {
477 void Merge(const PtrState &Other, bool TopDown);
482 PtrState::Merge(const PtrState &Other, bool TopDown) {
483 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
484 KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
486 // If we're not in a sequence (anymore), drop all associated state.
490 } else if (Partial || Other.Partial) {
491 // If we're doing a merge on a path that's previously seen a partial
492 // merge, conservatively drop the sequence, to avoid doing partial
493 // RR elimination. If the branch predicates for the two merge differ,
494 // mixing them is unsafe.
495 ClearSequenceProgress();
497 // Conservatively merge the ReleaseMetadata information.
498 if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
499 RRI.ReleaseMetadata = 0;
501 RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
502 RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
503 Other.RRI.IsTailCallRelease;
504 RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
506 // Merge the insert point sets. If there are any differences,
507 // that makes this a partial merge.
508 Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
509 for (SmallPtrSet<Instruction *, 2>::const_iterator
510 I = Other.RRI.ReverseInsertPts.begin(),
511 E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
512 Partial |= RRI.ReverseInsertPts.insert(*I);
517 /// \brief Per-BasicBlock state.
519 /// The number of unique control paths from the entry which can reach this
521 unsigned TopDownPathCount;
523 /// The number of unique control paths to exits from this block.
524 unsigned BottomUpPathCount;
526 /// A type for PerPtrTopDown and PerPtrBottomUp.
527 typedef MapVector<const Value *, PtrState> MapTy;
529 /// The top-down traversal uses this to record information known about a
530 /// pointer at the bottom of each block.
533 /// The bottom-up traversal uses this to record information known about a
534 /// pointer at the top of each block.
535 MapTy PerPtrBottomUp;
537 /// Effective predecessors of the current block ignoring ignorable edges and
538 /// ignored backedges.
539 SmallVector<BasicBlock *, 2> Preds;
540 /// Effective successors of the current block ignoring ignorable edges and
541 /// ignored backedges.
542 SmallVector<BasicBlock *, 2> Succs;
545 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
547 typedef MapTy::iterator ptr_iterator;
548 typedef MapTy::const_iterator ptr_const_iterator;
550 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
551 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
552 ptr_const_iterator top_down_ptr_begin() const {
553 return PerPtrTopDown.begin();
555 ptr_const_iterator top_down_ptr_end() const {
556 return PerPtrTopDown.end();
559 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
560 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
561 ptr_const_iterator bottom_up_ptr_begin() const {
562 return PerPtrBottomUp.begin();
564 ptr_const_iterator bottom_up_ptr_end() const {
565 return PerPtrBottomUp.end();
568 /// Mark this block as being an entry block, which has one path from the
569 /// entry by definition.
570 void SetAsEntry() { TopDownPathCount = 1; }
572 /// Mark this block as being an exit block, which has one path to an exit by
574 void SetAsExit() { BottomUpPathCount = 1; }
576 PtrState &getPtrTopDownState(const Value *Arg) {
577 return PerPtrTopDown[Arg];
580 PtrState &getPtrBottomUpState(const Value *Arg) {
581 return PerPtrBottomUp[Arg];
584 void clearBottomUpPointers() {
585 PerPtrBottomUp.clear();
588 void clearTopDownPointers() {
589 PerPtrTopDown.clear();
592 void InitFromPred(const BBState &Other);
593 void InitFromSucc(const BBState &Other);
594 void MergePred(const BBState &Other);
595 void MergeSucc(const BBState &Other);
597 /// Return the number of possible unique paths from an entry to an exit
598 /// which pass through this block. This is only valid after both the
599 /// top-down and bottom-up traversals are complete.
600 unsigned GetAllPathCount() const {
601 assert(TopDownPathCount != 0);
602 assert(BottomUpPathCount != 0);
603 return TopDownPathCount * BottomUpPathCount;
606 // Specialized CFG utilities.
607 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
608 edge_iterator pred_begin() { return Preds.begin(); }
609 edge_iterator pred_end() { return Preds.end(); }
610 edge_iterator succ_begin() { return Succs.begin(); }
611 edge_iterator succ_end() { return Succs.end(); }
613 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
614 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
616 bool isExit() const { return Succs.empty(); }
620 void BBState::InitFromPred(const BBState &Other) {
621 PerPtrTopDown = Other.PerPtrTopDown;
622 TopDownPathCount = Other.TopDownPathCount;
625 void BBState::InitFromSucc(const BBState &Other) {
626 PerPtrBottomUp = Other.PerPtrBottomUp;
627 BottomUpPathCount = Other.BottomUpPathCount;
630 /// The top-down traversal uses this to merge information about predecessors to
631 /// form the initial state for a new block.
632 void BBState::MergePred(const BBState &Other) {
633 // Other.TopDownPathCount can be 0, in which case it is either dead or a
634 // loop backedge. Loop backedges are special.
635 TopDownPathCount += Other.TopDownPathCount;
637 // Check for overflow. If we have overflow, fall back to conservative
639 if (TopDownPathCount < Other.TopDownPathCount) {
640 clearTopDownPointers();
644 // For each entry in the other set, if our set has an entry with the same key,
645 // merge the entries. Otherwise, copy the entry and merge it with an empty
647 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
648 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
649 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
650 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
654 // For each entry in our set, if the other set doesn't have an entry with the
655 // same key, force it to merge with an empty entry.
656 for (ptr_iterator MI = top_down_ptr_begin(),
657 ME = top_down_ptr_end(); MI != ME; ++MI)
658 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
659 MI->second.Merge(PtrState(), /*TopDown=*/true);
662 /// The bottom-up traversal uses this to merge information about successors to
663 /// form the initial state for a new block.
664 void BBState::MergeSucc(const BBState &Other) {
665 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
666 // loop backedge. Loop backedges are special.
667 BottomUpPathCount += Other.BottomUpPathCount;
669 // Check for overflow. If we have overflow, fall back to conservative
671 if (BottomUpPathCount < Other.BottomUpPathCount) {
672 clearBottomUpPointers();
676 // For each entry in the other set, if our set has an entry with the
677 // same key, merge the entries. Otherwise, copy the entry and merge
678 // it with an empty entry.
679 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
680 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
681 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
682 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
686 // For each entry in our set, if the other set doesn't have an entry
687 // with the same key, force it to merge with an empty entry.
688 for (ptr_iterator MI = bottom_up_ptr_begin(),
689 ME = bottom_up_ptr_end(); MI != ME; ++MI)
690 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
691 MI->second.Merge(PtrState(), /*TopDown=*/false);
694 // Only enable ARC Annotations if we are building a debug version of
697 #define ARC_ANNOTATIONS
700 // Define some macros along the lines of DEBUG and some helper functions to make
701 // it cleaner to create annotations in the source code and to no-op when not
702 // building in debug mode.
703 #ifdef ARC_ANNOTATIONS
705 #include "llvm/Support/CommandLine.h"
707 /// Enable/disable ARC sequence annotations.
709 EnableARCAnnotations("enable-objc-arc-annotations", cl::init(false));
711 /// This function appends a unique ARCAnnotationProvenanceSourceMDKind id to an
712 /// instruction so that we can track backwards when post processing via the llvm
713 /// arc annotation processor tool. If the function is an
714 static MDString *AppendMDNodeToSourcePtr(unsigned NodeId,
718 // If pointer is a result of an instruction and it does not have a source
719 // MDNode it, attach a new MDNode onto it. If pointer is a result of
720 // an instruction and does have a source MDNode attached to it, return a
721 // reference to said Node. Otherwise just return 0.
722 if (Instruction *Inst = dyn_cast<Instruction>(Ptr)) {
724 if (!(Node = Inst->getMetadata(NodeId))) {
725 // We do not have any node. Generate and attatch the hash MDString to the
728 // We just use an MDString to ensure that this metadata gets written out
729 // of line at the module level and to provide a very simple format
730 // encoding the information herein. Both of these makes it simpler to
731 // parse the annotations by a simple external program.
733 raw_string_ostream os(Str);
734 os << "(" << Inst->getParent()->getParent()->getName() << ",%"
735 << Inst->getName() << ")";
737 Hash = MDString::get(Inst->getContext(), os.str());
738 Inst->setMetadata(NodeId, MDNode::get(Inst->getContext(),Hash));
740 // We have a node. Grab its hash and return it.
741 assert(Node->getNumOperands() == 1 &&
742 "An ARCAnnotationProvenanceSourceMDKind can only have 1 operand.");
743 Hash = cast<MDString>(Node->getOperand(0));
745 } else if (Argument *Arg = dyn_cast<Argument>(Ptr)) {
747 raw_string_ostream os(str);
748 os << "(" << Arg->getParent()->getName() << ",%" << Arg->getName()
750 Hash = MDString::get(Arg->getContext(), os.str());
756 static std::string SequenceToString(Sequence A) {
758 raw_string_ostream os(str);
763 /// Helper function to change a Sequence into a String object using our overload
764 /// for raw_ostream so we only have printing code in one location.
765 static MDString *SequenceToMDString(LLVMContext &Context,
767 return MDString::get(Context, SequenceToString(A));
770 /// A simple function to generate a MDNode which describes the change in state
771 /// for Value *Ptr caused by Instruction *Inst.
772 static void AppendMDNodeToInstForPtr(unsigned NodeId,
775 MDString *PtrSourceMDNodeID,
779 Value *tmp[3] = {PtrSourceMDNodeID,
780 SequenceToMDString(Inst->getContext(),
782 SequenceToMDString(Inst->getContext(),
784 Node = MDNode::get(Inst->getContext(),
785 ArrayRef<Value*>(tmp, 3));
787 Inst->setMetadata(NodeId, Node);
790 /// Add to the beginning of the basic block llvm.ptr.annotations which show the
791 /// state of a pointer at the entrance to a basic block.
792 static void GenerateARCBBEntranceAnnotation(const char *Name, BasicBlock *BB,
793 Value *Ptr, Sequence Seq) {
794 Module *M = BB->getParent()->getParent();
795 LLVMContext &C = M->getContext();
796 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
797 Type *I8XX = PointerType::getUnqual(I8X);
798 Type *Params[] = {I8XX, I8XX};
799 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
800 ArrayRef<Type*>(Params, 2),
802 Constant *Callee = M->getOrInsertFunction(Name, FTy);
804 IRBuilder<> Builder(BB, BB->getFirstInsertionPt());
807 StringRef Tmp = Ptr->getName();
808 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
809 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
811 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
812 cast<Constant>(ActualPtrName), Tmp);
816 std::string SeqStr = SequenceToString(Seq);
817 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
818 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
820 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
821 cast<Constant>(ActualPtrName), SeqStr);
824 Builder.CreateCall2(Callee, PtrName, S);
827 /// Add to the end of the basic block llvm.ptr.annotations which show the state
828 /// of the pointer at the bottom of the basic block.
829 static void GenerateARCBBTerminatorAnnotation(const char *Name, BasicBlock *BB,
830 Value *Ptr, Sequence Seq) {
831 Module *M = BB->getParent()->getParent();
832 LLVMContext &C = M->getContext();
833 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
834 Type *I8XX = PointerType::getUnqual(I8X);
835 Type *Params[] = {I8XX, I8XX};
836 FunctionType *FTy = FunctionType::get(Type::getVoidTy(C),
837 ArrayRef<Type*>(Params, 2),
839 Constant *Callee = M->getOrInsertFunction(Name, FTy);
841 IRBuilder<> Builder(BB, llvm::prior(BB->end()));
844 StringRef Tmp = Ptr->getName();
845 if (0 == (PtrName = M->getGlobalVariable(Tmp, true))) {
846 Value *ActualPtrName = Builder.CreateGlobalStringPtr(Tmp,
848 PtrName = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
849 cast<Constant>(ActualPtrName), Tmp);
853 std::string SeqStr = SequenceToString(Seq);
854 if (0 == (S = M->getGlobalVariable(SeqStr, true))) {
855 Value *ActualPtrName = Builder.CreateGlobalStringPtr(SeqStr,
857 S = new GlobalVariable(*M, I8X, true, GlobalVariable::InternalLinkage,
858 cast<Constant>(ActualPtrName), SeqStr);
860 Builder.CreateCall2(Callee, PtrName, S);
863 /// Adds a source annotation to pointer and a state change annotation to Inst
864 /// referencing the source annotation and the old/new state of pointer.
865 static void GenerateARCAnnotation(unsigned InstMDId,
871 if (EnableARCAnnotations) {
872 // First generate the source annotation on our pointer. This will return an
873 // MDString* if Ptr actually comes from an instruction implying we can put
874 // in a source annotation. If AppendMDNodeToSourcePtr returns 0 (i.e. NULL),
875 // then we know that our pointer is from an Argument so we put a reference
876 // to the argument number.
878 // The point of this is to make it easy for the
879 // llvm-arc-annotation-processor tool to cross reference where the source
880 // pointer is in the LLVM IR since the LLVM IR parser does not submit such
881 // information via debug info for backends to use (since why would anyone
882 // need such a thing from LLVM IR besides in non standard cases
884 MDString *SourcePtrMDNode =
885 AppendMDNodeToSourcePtr(PtrMDId, Ptr);
886 AppendMDNodeToInstForPtr(InstMDId, Inst, Ptr, SourcePtrMDNode, OldSeq,
891 // The actual interface for accessing the above functionality is defined via
892 // some simple macros which are defined below. We do this so that the user does
893 // not need to pass in what metadata id is needed resulting in cleaner code and
894 // additionally since it provides an easy way to conditionally no-op all
895 // annotation support in a non-debug build.
897 /// Use this macro to annotate a sequence state change when processing
898 /// instructions bottom up,
899 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new) \
900 GenerateARCAnnotation(ARCAnnotationBottomUpMDKind, \
901 ARCAnnotationProvenanceSourceMDKind, (inst), \
902 const_cast<Value*>(ptr), (old), (new))
903 /// Use this macro to annotate a sequence state change when processing
904 /// instructions top down.
905 #define ANNOTATE_TOPDOWN(inst, ptr, old, new) \
906 GenerateARCAnnotation(ARCAnnotationTopDownMDKind, \
907 ARCAnnotationProvenanceSourceMDKind, (inst), \
908 const_cast<Value*>(ptr), (old), (new))
910 #define ANNOTATE_BB(_states, _bb, _name, _type, _direction) \
912 if (EnableARCAnnotations) { \
913 for(BBState::ptr_const_iterator I = (_states)._direction##_ptr_begin(), \
914 E = (_states)._direction##_ptr_end(); I != E; ++I) { \
915 Value *Ptr = const_cast<Value*>(I->first); \
916 Sequence Seq = I->second.GetSeq(); \
917 GenerateARCBB ## _type ## Annotation(_name, (_bb), Ptr, Seq); \
922 #define ANNOTATE_BOTTOMUP_BBSTART(_states, _basicblock) \
923 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbstart", \
925 #define ANNOTATE_BOTTOMUP_BBEND(_states, _basicblock) \
926 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.bottomup.bbend", \
927 Terminator, bottom_up)
928 #define ANNOTATE_TOPDOWN_BBSTART(_states, _basicblock) \
929 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbstart", \
931 #define ANNOTATE_TOPDOWN_BBEND(_states, _basicblock) \
932 ANNOTATE_BB(_states, _basicblock, "llvm.arc.annotation.topdown.bbend", \
933 Terminator, top_down)
935 #else // !ARC_ANNOTATION
936 // If annotations are off, noop.
937 #define ANNOTATE_BOTTOMUP(inst, ptr, old, new)
938 #define ANNOTATE_TOPDOWN(inst, ptr, old, new)
939 #define ANNOTATE_BOTTOMUP_BBSTART(states, basicblock)
940 #define ANNOTATE_BOTTOMUP_BBEND(states, basicblock)
941 #define ANNOTATE_TOPDOWN_BBSTART(states, basicblock)
942 #define ANNOTATE_TOPDOWN_BBEND(states, basicblock)
943 #endif // !ARC_ANNOTATION
946 /// \brief The main ARC optimization pass.
947 class ObjCARCOpt : public FunctionPass {
949 ProvenanceAnalysis PA;
951 /// A flag indicating whether this optimization pass should run.
954 /// Declarations for ObjC runtime functions, for use in creating calls to
955 /// them. These are initialized lazily to avoid cluttering up the Module
956 /// with unused declarations.
958 /// Declaration for ObjC runtime function
959 /// objc_retainAutoreleasedReturnValue.
960 Constant *RetainRVCallee;
961 /// Declaration for ObjC runtime function objc_autoreleaseReturnValue.
962 Constant *AutoreleaseRVCallee;
963 /// Declaration for ObjC runtime function objc_release.
964 Constant *ReleaseCallee;
965 /// Declaration for ObjC runtime function objc_retain.
966 Constant *RetainCallee;
967 /// Declaration for ObjC runtime function objc_retainBlock.
968 Constant *RetainBlockCallee;
969 /// Declaration for ObjC runtime function objc_autorelease.
970 Constant *AutoreleaseCallee;
972 /// Flags which determine whether each of the interesting runtine functions
973 /// is in fact used in the current function.
974 unsigned UsedInThisFunction;
976 /// The Metadata Kind for clang.imprecise_release metadata.
977 unsigned ImpreciseReleaseMDKind;
979 /// The Metadata Kind for clang.arc.copy_on_escape metadata.
980 unsigned CopyOnEscapeMDKind;
982 /// The Metadata Kind for clang.arc.no_objc_arc_exceptions metadata.
983 unsigned NoObjCARCExceptionsMDKind;
985 #ifdef ARC_ANNOTATIONS
986 /// The Metadata Kind for llvm.arc.annotation.bottomup metadata.
987 unsigned ARCAnnotationBottomUpMDKind;
988 /// The Metadata Kind for llvm.arc.annotation.topdown metadata.
989 unsigned ARCAnnotationTopDownMDKind;
990 /// The Metadata Kind for llvm.arc.annotation.provenancesource metadata.
991 unsigned ARCAnnotationProvenanceSourceMDKind;
992 #endif // ARC_ANNOATIONS
994 Constant *getRetainRVCallee(Module *M);
995 Constant *getAutoreleaseRVCallee(Module *M);
996 Constant *getReleaseCallee(Module *M);
997 Constant *getRetainCallee(Module *M);
998 Constant *getRetainBlockCallee(Module *M);
999 Constant *getAutoreleaseCallee(Module *M);
1001 bool IsRetainBlockOptimizable(const Instruction *Inst);
1003 void OptimizeRetainCall(Function &F, Instruction *Retain);
1004 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1005 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1006 InstructionClass &Class);
1007 bool OptimizeRetainBlockCall(Function &F, Instruction *RetainBlock,
1008 InstructionClass &Class);
1009 void OptimizeIndividualCalls(Function &F);
1011 void CheckForCFGHazards(const BasicBlock *BB,
1012 DenseMap<const BasicBlock *, BBState> &BBStates,
1013 BBState &MyStates) const;
1014 bool VisitInstructionBottomUp(Instruction *Inst,
1016 MapVector<Value *, RRInfo> &Retains,
1018 bool VisitBottomUp(BasicBlock *BB,
1019 DenseMap<const BasicBlock *, BBState> &BBStates,
1020 MapVector<Value *, RRInfo> &Retains);
1021 bool VisitInstructionTopDown(Instruction *Inst,
1022 DenseMap<Value *, RRInfo> &Releases,
1024 bool VisitTopDown(BasicBlock *BB,
1025 DenseMap<const BasicBlock *, BBState> &BBStates,
1026 DenseMap<Value *, RRInfo> &Releases);
1027 bool Visit(Function &F,
1028 DenseMap<const BasicBlock *, BBState> &BBStates,
1029 MapVector<Value *, RRInfo> &Retains,
1030 DenseMap<Value *, RRInfo> &Releases);
1032 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1033 MapVector<Value *, RRInfo> &Retains,
1034 DenseMap<Value *, RRInfo> &Releases,
1035 SmallVectorImpl<Instruction *> &DeadInsts,
1038 bool ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState> &BBStates,
1039 MapVector<Value *, RRInfo> &Retains,
1040 DenseMap<Value *, RRInfo> &Releases,
1042 SmallVector<Instruction *, 4> &NewRetains,
1043 SmallVector<Instruction *, 4> &NewReleases,
1044 SmallVector<Instruction *, 8> &DeadInsts,
1045 RRInfo &RetainsToMove,
1046 RRInfo &ReleasesToMove,
1049 bool &AnyPairsCompletelyEliminated);
1051 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1052 MapVector<Value *, RRInfo> &Retains,
1053 DenseMap<Value *, RRInfo> &Releases,
1056 void OptimizeWeakCalls(Function &F);
1058 bool OptimizeSequences(Function &F);
1060 void OptimizeReturns(Function &F);
1062 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1063 virtual bool doInitialization(Module &M);
1064 virtual bool runOnFunction(Function &F);
1065 virtual void releaseMemory();
1069 ObjCARCOpt() : FunctionPass(ID) {
1070 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1075 char ObjCARCOpt::ID = 0;
1076 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1077 "objc-arc", "ObjC ARC optimization", false, false)
1078 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1079 INITIALIZE_PASS_END(ObjCARCOpt,
1080 "objc-arc", "ObjC ARC optimization", false, false)
1082 Pass *llvm::createObjCARCOptPass() {
1083 return new ObjCARCOpt();
1086 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1087 AU.addRequired<ObjCARCAliasAnalysis>();
1088 AU.addRequired<AliasAnalysis>();
1089 // ARC optimization doesn't currently split critical edges.
1090 AU.setPreservesCFG();
1093 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1094 // Without the magic metadata tag, we have to assume this might be an
1095 // objc_retainBlock call inserted to convert a block pointer to an id,
1096 // in which case it really is needed.
1097 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1100 // If the pointer "escapes" (not including being used in a call),
1101 // the copy may be needed.
1102 if (DoesRetainableObjPtrEscape(Inst))
1105 // Otherwise, it's not needed.
1109 Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
1110 if (!RetainRVCallee) {
1111 LLVMContext &C = M->getContext();
1112 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1113 Type *Params[] = { I8X };
1114 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1115 AttributeSet Attribute =
1116 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1117 Attribute::NoUnwind);
1119 M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
1122 return RetainRVCallee;
1125 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
1126 if (!AutoreleaseRVCallee) {
1127 LLVMContext &C = M->getContext();
1128 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1129 Type *Params[] = { I8X };
1130 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1131 AttributeSet Attribute =
1132 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1133 Attribute::NoUnwind);
1134 AutoreleaseRVCallee =
1135 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
1138 return AutoreleaseRVCallee;
1141 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
1142 if (!ReleaseCallee) {
1143 LLVMContext &C = M->getContext();
1144 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1145 AttributeSet Attribute =
1146 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1147 Attribute::NoUnwind);
1149 M->getOrInsertFunction(
1151 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
1154 return ReleaseCallee;
1157 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
1158 if (!RetainCallee) {
1159 LLVMContext &C = M->getContext();
1160 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1161 AttributeSet Attribute =
1162 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1163 Attribute::NoUnwind);
1165 M->getOrInsertFunction(
1167 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1170 return RetainCallee;
1173 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
1174 if (!RetainBlockCallee) {
1175 LLVMContext &C = M->getContext();
1176 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1177 // objc_retainBlock is not nounwind because it calls user copy constructors
1178 // which could theoretically throw.
1180 M->getOrInsertFunction(
1182 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1185 return RetainBlockCallee;
1188 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
1189 if (!AutoreleaseCallee) {
1190 LLVMContext &C = M->getContext();
1191 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1192 AttributeSet Attribute =
1193 AttributeSet().addAttribute(M->getContext(), AttributeSet::FunctionIndex,
1194 Attribute::NoUnwind);
1196 M->getOrInsertFunction(
1198 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1201 return AutoreleaseCallee;
1204 /// Turn objc_retain into objc_retainAutoreleasedReturnValue if the operand is a
1207 ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
1208 ImmutableCallSite CS(GetObjCArg(Retain));
1209 const Instruction *Call = CS.getInstruction();
1211 if (Call->getParent() != Retain->getParent()) return;
1213 // Check that the call is next to the retain.
1214 BasicBlock::const_iterator I = Call;
1216 while (IsNoopInstruction(I)) ++I;
1220 // Turn it to an objc_retainAutoreleasedReturnValue..
1224 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainCall: Transforming "
1225 "objc_retain => objc_retainAutoreleasedReturnValue"
1226 " since the operand is a return value.\n"
1228 << *Retain << "\n");
1230 cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent()));
1232 DEBUG(dbgs() << " New: "
1233 << *Retain << "\n");
1236 /// Turn objc_retainAutoreleasedReturnValue into objc_retain if the operand is
1237 /// not a return value. Or, if it can be paired with an
1238 /// objc_autoreleaseReturnValue, delete the pair and return true.
1240 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
1241 // Check for the argument being from an immediately preceding call or invoke.
1242 const Value *Arg = GetObjCArg(RetainRV);
1243 ImmutableCallSite CS(Arg);
1244 if (const Instruction *Call = CS.getInstruction()) {
1245 if (Call->getParent() == RetainRV->getParent()) {
1246 BasicBlock::const_iterator I = Call;
1248 while (IsNoopInstruction(I)) ++I;
1249 if (&*I == RetainRV)
1251 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
1252 BasicBlock *RetainRVParent = RetainRV->getParent();
1253 if (II->getNormalDest() == RetainRVParent) {
1254 BasicBlock::const_iterator I = RetainRVParent->begin();
1255 while (IsNoopInstruction(I)) ++I;
1256 if (&*I == RetainRV)
1262 // Check for being preceded by an objc_autoreleaseReturnValue on the same
1263 // pointer. In this case, we can delete the pair.
1264 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
1266 do --I; while (I != Begin && IsNoopInstruction(I));
1267 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
1268 GetObjCArg(I) == Arg) {
1272 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Erasing " << *I << "\n"
1273 << " Erasing " << *RetainRV
1276 EraseInstruction(I);
1277 EraseInstruction(RetainRV);
1282 // Turn it to a plain objc_retain.
1286 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Transforming "
1287 "objc_retainAutoreleasedReturnValue => "
1288 "objc_retain since the operand is not a return value.\n"
1290 << *RetainRV << "\n");
1292 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
1294 DEBUG(dbgs() << " New: "
1295 << *RetainRV << "\n");
1300 /// Turn objc_autoreleaseReturnValue into objc_autorelease if the result is not
1301 /// used as a return value.
1303 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV,
1304 InstructionClass &Class) {
1305 // Check for a return of the pointer value.
1306 const Value *Ptr = GetObjCArg(AutoreleaseRV);
1307 SmallVector<const Value *, 2> Users;
1308 Users.push_back(Ptr);
1310 Ptr = Users.pop_back_val();
1311 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
1313 const User *I = *UI;
1314 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
1316 if (isa<BitCastInst>(I))
1319 } while (!Users.empty());
1324 DEBUG(dbgs() << "ObjCARCOpt::OptimizeAutoreleaseRVCall: Transforming "
1325 "objc_autoreleaseReturnValue => "
1326 "objc_autorelease since its operand is not used as a return "
1329 << *AutoreleaseRV << "\n");
1331 CallInst *AutoreleaseRVCI = cast<CallInst>(AutoreleaseRV);
1333 setCalledFunction(getAutoreleaseCallee(F.getParent()));
1334 AutoreleaseRVCI->setTailCall(false); // Never tail call objc_autorelease.
1335 Class = IC_Autorelease;
1337 DEBUG(dbgs() << " New: "
1338 << *AutoreleaseRV << "\n");
1342 // \brief Attempt to strength reduce objc_retainBlock calls to objc_retain
1345 // Specifically: If an objc_retainBlock call has the copy_on_escape metadata and
1346 // does not escape (following the rules of block escaping), strength reduce the
1347 // objc_retainBlock to an objc_retain.
1349 // TODO: If an objc_retainBlock call is dominated period by a previous
1350 // objc_retainBlock call, strength reduce the objc_retainBlock to an
1353 ObjCARCOpt::OptimizeRetainBlockCall(Function &F, Instruction *Inst,
1354 InstructionClass &Class) {
1355 assert(GetBasicInstructionClass(Inst) == Class);
1356 assert(IC_RetainBlock == Class);
1358 // If we can not optimize Inst, return false.
1359 if (!IsRetainBlockOptimizable(Inst))
1362 CallInst *RetainBlock = cast<CallInst>(Inst);
1363 RetainBlock->setCalledFunction(getRetainCallee(F.getParent()));
1364 // Remove copy_on_escape metadata.
1365 RetainBlock->setMetadata(CopyOnEscapeMDKind, 0);
1371 /// Visit each call, one at a time, and make simplifications without doing any
1372 /// additional analysis.
1373 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
1374 // Reset all the flags in preparation for recomputing them.
1375 UsedInThisFunction = 0;
1377 // Visit all objc_* calls in F.
1378 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
1379 Instruction *Inst = &*I++;
1381 InstructionClass Class = GetBasicInstructionClass(Inst);
1383 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Visiting: Class: "
1384 << Class << "; " << *Inst << "\n");
1389 // Delete no-op casts. These function calls have special semantics, but
1390 // the semantics are entirely implemented via lowering in the front-end,
1391 // so by the time they reach the optimizer, they are just no-op calls
1392 // which return their argument.
1394 // There are gray areas here, as the ability to cast reference-counted
1395 // pointers to raw void* and back allows code to break ARC assumptions,
1396 // however these are currently considered to be unimportant.
1400 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Erasing no-op cast:"
1401 " " << *Inst << "\n");
1402 EraseInstruction(Inst);
1405 // If the pointer-to-weak-pointer is null, it's undefined behavior.
1408 case IC_LoadWeakRetained:
1410 case IC_DestroyWeak: {
1411 CallInst *CI = cast<CallInst>(Inst);
1412 if (IsNullOrUndef(CI->getArgOperand(0))) {
1414 Type *Ty = CI->getArgOperand(0)->getType();
1415 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1416 Constant::getNullValue(Ty),
1418 llvm::Value *NewValue = UndefValue::get(CI->getType());
1419 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null "
1420 "pointer-to-weak-pointer is undefined behavior.\n"
1424 CI->replaceAllUsesWith(NewValue);
1425 CI->eraseFromParent();
1432 CallInst *CI = cast<CallInst>(Inst);
1433 if (IsNullOrUndef(CI->getArgOperand(0)) ||
1434 IsNullOrUndef(CI->getArgOperand(1))) {
1436 Type *Ty = CI->getArgOperand(0)->getType();
1437 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
1438 Constant::getNullValue(Ty),
1441 llvm::Value *NewValue = UndefValue::get(CI->getType());
1442 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: A null "
1443 "pointer-to-weak-pointer is undefined behavior.\n"
1448 CI->replaceAllUsesWith(NewValue);
1449 CI->eraseFromParent();
1454 case IC_RetainBlock:
1455 // If we strength reduce an objc_retainBlock to amn objc_retain, continue
1456 // onto the objc_retain peephole optimizations. Otherwise break.
1457 if (!OptimizeRetainBlockCall(F, Inst, Class))
1461 OptimizeRetainCall(F, Inst);
1464 if (OptimizeRetainRVCall(F, Inst))
1467 case IC_AutoreleaseRV:
1468 OptimizeAutoreleaseRVCall(F, Inst, Class);
1472 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
1473 if (IsAutorelease(Class) && Inst->use_empty()) {
1474 CallInst *Call = cast<CallInst>(Inst);
1475 const Value *Arg = Call->getArgOperand(0);
1476 Arg = FindSingleUseIdentifiedObject(Arg);
1481 // Create the declaration lazily.
1482 LLVMContext &C = Inst->getContext();
1484 CallInst::Create(getReleaseCallee(F.getParent()),
1485 Call->getArgOperand(0), "", Call);
1486 NewCall->setMetadata(ImpreciseReleaseMDKind,
1487 MDNode::get(C, ArrayRef<Value *>()));
1489 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Replacing "
1490 "objc_autorelease(x) with objc_release(x) since x is "
1491 "otherwise unused.\n"
1492 " Old: " << *Call <<
1496 EraseInstruction(Call);
1502 // For functions which can never be passed stack arguments, add
1504 if (IsAlwaysTail(Class)) {
1506 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Adding tail keyword"
1507 " to function since it can never be passed stack args: " << *Inst <<
1509 cast<CallInst>(Inst)->setTailCall();
1512 // Ensure that functions that can never have a "tail" keyword due to the
1513 // semantics of ARC truly do not do so.
1514 if (IsNeverTail(Class)) {
1516 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Removing tail "
1517 "keyword from function: " << *Inst <<
1519 cast<CallInst>(Inst)->setTailCall(false);
1522 // Set nounwind as needed.
1523 if (IsNoThrow(Class)) {
1525 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Found no throw"
1526 " class. Setting nounwind on: " << *Inst << "\n");
1527 cast<CallInst>(Inst)->setDoesNotThrow();
1530 if (!IsNoopOnNull(Class)) {
1531 UsedInThisFunction |= 1 << Class;
1535 const Value *Arg = GetObjCArg(Inst);
1537 // ARC calls with null are no-ops. Delete them.
1538 if (IsNullOrUndef(Arg)) {
1541 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: ARC calls with "
1542 " null are no-ops. Erasing: " << *Inst << "\n");
1543 EraseInstruction(Inst);
1547 // Keep track of which of retain, release, autorelease, and retain_block
1548 // are actually present in this function.
1549 UsedInThisFunction |= 1 << Class;
1551 // If Arg is a PHI, and one or more incoming values to the
1552 // PHI are null, and the call is control-equivalent to the PHI, and there
1553 // are no relevant side effects between the PHI and the call, the call
1554 // could be pushed up to just those paths with non-null incoming values.
1555 // For now, don't bother splitting critical edges for this.
1556 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
1557 Worklist.push_back(std::make_pair(Inst, Arg));
1559 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
1563 const PHINode *PN = dyn_cast<PHINode>(Arg);
1566 // Determine if the PHI has any null operands, or any incoming
1568 bool HasNull = false;
1569 bool HasCriticalEdges = false;
1570 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1572 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1573 if (IsNullOrUndef(Incoming))
1575 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
1576 .getNumSuccessors() != 1) {
1577 HasCriticalEdges = true;
1581 // If we have null operands and no critical edges, optimize.
1582 if (!HasCriticalEdges && HasNull) {
1583 SmallPtrSet<Instruction *, 4> DependingInstructions;
1584 SmallPtrSet<const BasicBlock *, 4> Visited;
1586 // Check that there is nothing that cares about the reference
1587 // count between the call and the phi.
1590 case IC_RetainBlock:
1591 // These can always be moved up.
1594 // These can't be moved across things that care about the retain
1596 FindDependencies(NeedsPositiveRetainCount, Arg,
1597 Inst->getParent(), Inst,
1598 DependingInstructions, Visited, PA);
1600 case IC_Autorelease:
1601 // These can't be moved across autorelease pool scope boundaries.
1602 FindDependencies(AutoreleasePoolBoundary, Arg,
1603 Inst->getParent(), Inst,
1604 DependingInstructions, Visited, PA);
1607 case IC_AutoreleaseRV:
1608 // Don't move these; the RV optimization depends on the autoreleaseRV
1609 // being tail called, and the retainRV being immediately after a call
1610 // (which might still happen if we get lucky with codegen layout, but
1611 // it's not worth taking the chance).
1614 llvm_unreachable("Invalid dependence flavor");
1617 if (DependingInstructions.size() == 1 &&
1618 *DependingInstructions.begin() == PN) {
1621 // Clone the call into each predecessor that has a non-null value.
1622 CallInst *CInst = cast<CallInst>(Inst);
1623 Type *ParamTy = CInst->getArgOperand(0)->getType();
1624 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1626 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
1627 if (!IsNullOrUndef(Incoming)) {
1628 CallInst *Clone = cast<CallInst>(CInst->clone());
1629 Value *Op = PN->getIncomingValue(i);
1630 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
1631 if (Op->getType() != ParamTy)
1632 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
1633 Clone->setArgOperand(0, Op);
1634 Clone->insertBefore(InsertPos);
1636 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Cloning "
1639 "clone at " << *InsertPos << "\n");
1640 Worklist.push_back(std::make_pair(Clone, Incoming));
1643 // Erase the original call.
1644 DEBUG(dbgs() << "Erasing: " << *CInst << "\n");
1645 EraseInstruction(CInst);
1649 } while (!Worklist.empty());
1651 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Finished List.\n");
1654 /// Check for critical edges, loop boundaries, irreducible control flow, or
1655 /// other CFG structures where moving code across the edge would result in it
1656 /// being executed more.
1658 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
1659 DenseMap<const BasicBlock *, BBState> &BBStates,
1660 BBState &MyStates) const {
1661 // If any top-down local-use or possible-dec has a succ which is earlier in
1662 // the sequence, forget it.
1663 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
1664 E = MyStates.top_down_ptr_end(); I != E; ++I)
1665 switch (I->second.GetSeq()) {
1668 const Value *Arg = I->first;
1669 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1670 bool SomeSuccHasSame = false;
1671 bool AllSuccsHaveSame = true;
1672 PtrState &S = I->second;
1673 succ_const_iterator SI(TI), SE(TI, false);
1675 for (; SI != SE; ++SI) {
1676 Sequence SuccSSeq = S_None;
1677 bool SuccSRRIKnownSafe = false;
1678 // If VisitBottomUp has pointer information for this successor, take
1679 // what we know about it.
1680 DenseMap<const BasicBlock *, BBState>::iterator BBI =
1682 assert(BBI != BBStates.end());
1683 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1684 SuccSSeq = SuccS.GetSeq();
1685 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1688 case S_CanRelease: {
1689 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1690 S.ClearSequenceProgress();
1696 SomeSuccHasSame = true;
1700 case S_MovableRelease:
1701 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1702 AllSuccsHaveSame = false;
1705 llvm_unreachable("bottom-up pointer in retain state!");
1708 // If the state at the other end of any of the successor edges
1709 // matches the current state, require all edges to match. This
1710 // guards against loops in the middle of a sequence.
1711 if (SomeSuccHasSame && !AllSuccsHaveSame)
1712 S.ClearSequenceProgress();
1715 case S_CanRelease: {
1716 const Value *Arg = I->first;
1717 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
1718 bool SomeSuccHasSame = false;
1719 bool AllSuccsHaveSame = true;
1720 PtrState &S = I->second;
1721 succ_const_iterator SI(TI), SE(TI, false);
1723 for (; SI != SE; ++SI) {
1724 Sequence SuccSSeq = S_None;
1725 bool SuccSRRIKnownSafe = false;
1726 // If VisitBottomUp has pointer information for this successor, take
1727 // what we know about it.
1728 DenseMap<const BasicBlock *, BBState>::iterator BBI =
1730 assert(BBI != BBStates.end());
1731 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
1732 SuccSSeq = SuccS.GetSeq();
1733 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
1736 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
1737 S.ClearSequenceProgress();
1743 SomeSuccHasSame = true;
1747 case S_MovableRelease:
1749 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
1750 AllSuccsHaveSame = false;
1753 llvm_unreachable("bottom-up pointer in retain state!");
1756 // If the state at the other end of any of the successor edges
1757 // matches the current state, require all edges to match. This
1758 // guards against loops in the middle of a sequence.
1759 if (SomeSuccHasSame && !AllSuccsHaveSame)
1760 S.ClearSequenceProgress();
1767 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
1769 MapVector<Value *, RRInfo> &Retains,
1770 BBState &MyStates) {
1771 bool NestingDetected = false;
1772 InstructionClass Class = GetInstructionClass(Inst);
1773 const Value *Arg = 0;
1777 Arg = GetObjCArg(Inst);
1779 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1781 // If we see two releases in a row on the same pointer. If so, make
1782 // a note, and we'll cicle back to revisit it after we've
1783 // hopefully eliminated the second release, which may allow us to
1784 // eliminate the first release too.
1785 // Theoretically we could implement removal of nested retain+release
1786 // pairs by making PtrState hold a stack of states, but this is
1787 // simple and avoids adding overhead for the non-nested case.
1788 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease) {
1789 DEBUG(dbgs() << "ObjCARCOpt::VisitInstructionBottomUp: Found nested "
1790 "releases (i.e. a release pair)\n");
1791 NestingDetected = true;
1794 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
1795 Sequence NewSeq = ReleaseMetadata ? S_MovableRelease : S_Release;
1796 ANNOTATE_BOTTOMUP(Inst, Arg, S.GetSeq(), NewSeq);
1797 S.ResetSequenceProgress(NewSeq);
1798 S.RRI.ReleaseMetadata = ReleaseMetadata;
1799 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
1800 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
1801 S.RRI.Calls.insert(Inst);
1802 S.SetKnownPositiveRefCount();
1805 case IC_RetainBlock:
1806 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1807 // objc_retainBlocks to objc_retains. Thus at this point any
1808 // objc_retainBlocks that we see are not optimizable.
1812 Arg = GetObjCArg(Inst);
1814 PtrState &S = MyStates.getPtrBottomUpState(Arg);
1815 S.SetKnownPositiveRefCount();
1817 Sequence OldSeq = S.GetSeq();
1821 case S_MovableRelease:
1823 S.RRI.ReverseInsertPts.clear();
1826 // Don't do retain+release tracking for IC_RetainRV, because it's
1827 // better to let it remain as the first instruction after a call.
1828 if (Class != IC_RetainRV)
1829 Retains[Inst] = S.RRI;
1830 S.ClearSequenceProgress();
1835 llvm_unreachable("bottom-up pointer in retain state!");
1837 ANNOTATE_BOTTOMUP(Inst, Arg, OldSeq, S.GetSeq());
1838 return NestingDetected;
1840 case IC_AutoreleasepoolPop:
1841 // Conservatively, clear MyStates for all known pointers.
1842 MyStates.clearBottomUpPointers();
1843 return NestingDetected;
1844 case IC_AutoreleasepoolPush:
1846 // These are irrelevant.
1847 return NestingDetected;
1852 // Consider any other possible effects of this instruction on each
1853 // pointer being tracked.
1854 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
1855 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
1856 const Value *Ptr = MI->first;
1858 continue; // Handled above.
1859 PtrState &S = MI->second;
1860 Sequence Seq = S.GetSeq();
1862 // Check for possible releases.
1863 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
1864 S.ClearKnownPositiveRefCount();
1867 S.SetSeq(S_CanRelease);
1868 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S.GetSeq());
1872 case S_MovableRelease:
1877 llvm_unreachable("bottom-up pointer in retain state!");
1881 // Check for possible direct uses.
1884 case S_MovableRelease:
1885 if (CanUse(Inst, Ptr, PA, Class)) {
1886 assert(S.RRI.ReverseInsertPts.empty());
1887 // If this is an invoke instruction, we're scanning it as part of
1888 // one of its successor blocks, since we can't insert code after it
1889 // in its own block, and we don't want to split critical edges.
1890 if (isa<InvokeInst>(Inst))
1891 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1893 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1895 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1896 } else if (Seq == S_Release && IsUser(Class)) {
1897 // Non-movable releases depend on any possible objc pointer use.
1899 ANNOTATE_BOTTOMUP(Inst, Ptr, S_Release, S_Stop);
1900 assert(S.RRI.ReverseInsertPts.empty());
1901 // As above; handle invoke specially.
1902 if (isa<InvokeInst>(Inst))
1903 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
1905 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
1909 if (CanUse(Inst, Ptr, PA, Class)) {
1911 ANNOTATE_BOTTOMUP(Inst, Ptr, Seq, S_Use);
1919 llvm_unreachable("bottom-up pointer in retain state!");
1923 return NestingDetected;
1927 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
1928 DenseMap<const BasicBlock *, BBState> &BBStates,
1929 MapVector<Value *, RRInfo> &Retains) {
1930 bool NestingDetected = false;
1931 BBState &MyStates = BBStates[BB];
1933 // Merge the states from each successor to compute the initial state
1934 // for the current block.
1935 BBState::edge_iterator SI(MyStates.succ_begin()),
1936 SE(MyStates.succ_end());
1938 const BasicBlock *Succ = *SI;
1939 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
1940 assert(I != BBStates.end());
1941 MyStates.InitFromSucc(I->second);
1943 for (; SI != SE; ++SI) {
1945 I = BBStates.find(Succ);
1946 assert(I != BBStates.end());
1947 MyStates.MergeSucc(I->second);
1951 // If ARC Annotations are enabled, output the current state of pointers at the
1952 // bottom of the basic block.
1953 ANNOTATE_BOTTOMUP_BBEND(MyStates, BB);
1955 // Visit all the instructions, bottom-up.
1956 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
1957 Instruction *Inst = llvm::prior(I);
1959 // Invoke instructions are visited as part of their successors (below).
1960 if (isa<InvokeInst>(Inst))
1963 DEBUG(dbgs() << "ObjCARCOpt::VisitButtonUp: Visiting " << *Inst << "\n");
1965 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
1968 // If there's a predecessor with an invoke, visit the invoke as if it were
1969 // part of this block, since we can't insert code after an invoke in its own
1970 // block, and we don't want to split critical edges.
1971 for (BBState::edge_iterator PI(MyStates.pred_begin()),
1972 PE(MyStates.pred_end()); PI != PE; ++PI) {
1973 BasicBlock *Pred = *PI;
1974 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
1975 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
1978 // If ARC Annotations are enabled, output the current state of pointers at the
1979 // top of the basic block.
1980 ANNOTATE_BOTTOMUP_BBSTART(MyStates, BB);
1982 return NestingDetected;
1986 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
1987 DenseMap<Value *, RRInfo> &Releases,
1988 BBState &MyStates) {
1989 bool NestingDetected = false;
1990 InstructionClass Class = GetInstructionClass(Inst);
1991 const Value *Arg = 0;
1994 case IC_RetainBlock:
1995 // In OptimizeIndividualCalls, we have strength reduced all optimizable
1996 // objc_retainBlocks to objc_retains. Thus at this point any
1997 // objc_retainBlocks that we see are not optimizable.
2001 Arg = GetObjCArg(Inst);
2003 PtrState &S = MyStates.getPtrTopDownState(Arg);
2005 // Don't do retain+release tracking for IC_RetainRV, because it's
2006 // better to let it remain as the first instruction after a call.
2007 if (Class != IC_RetainRV) {
2008 // If we see two retains in a row on the same pointer. If so, make
2009 // a note, and we'll cicle back to revisit it after we've
2010 // hopefully eliminated the second retain, which may allow us to
2011 // eliminate the first retain too.
2012 // Theoretically we could implement removal of nested retain+release
2013 // pairs by making PtrState hold a stack of states, but this is
2014 // simple and avoids adding overhead for the non-nested case.
2015 if (S.GetSeq() == S_Retain)
2016 NestingDetected = true;
2018 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_Retain);
2019 S.ResetSequenceProgress(S_Retain);
2020 S.RRI.KnownSafe = S.HasKnownPositiveRefCount();
2021 S.RRI.Calls.insert(Inst);
2024 S.SetKnownPositiveRefCount();
2026 // A retain can be a potential use; procede to the generic checking
2031 Arg = GetObjCArg(Inst);
2033 PtrState &S = MyStates.getPtrTopDownState(Arg);
2034 S.ClearKnownPositiveRefCount();
2036 switch (S.GetSeq()) {
2039 S.RRI.ReverseInsertPts.clear();
2042 S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2043 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
2044 Releases[Inst] = S.RRI;
2045 ANNOTATE_TOPDOWN(Inst, Arg, S.GetSeq(), S_None);
2046 S.ClearSequenceProgress();
2052 case S_MovableRelease:
2053 llvm_unreachable("top-down pointer in release state!");
2057 case IC_AutoreleasepoolPop:
2058 // Conservatively, clear MyStates for all known pointers.
2059 MyStates.clearTopDownPointers();
2060 return NestingDetected;
2061 case IC_AutoreleasepoolPush:
2063 // These are irrelevant.
2064 return NestingDetected;
2069 // Consider any other possible effects of this instruction on each
2070 // pointer being tracked.
2071 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2072 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2073 const Value *Ptr = MI->first;
2075 continue; // Handled above.
2076 PtrState &S = MI->second;
2077 Sequence Seq = S.GetSeq();
2079 // Check for possible releases.
2080 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2081 S.ClearKnownPositiveRefCount();
2084 S.SetSeq(S_CanRelease);
2085 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_CanRelease);
2086 assert(S.RRI.ReverseInsertPts.empty());
2087 S.RRI.ReverseInsertPts.insert(Inst);
2089 // One call can't cause a transition from S_Retain to S_CanRelease
2090 // and S_CanRelease to S_Use. If we've made the first transition,
2099 case S_MovableRelease:
2100 llvm_unreachable("top-down pointer in release state!");
2104 // Check for possible direct uses.
2107 if (CanUse(Inst, Ptr, PA, Class)) {
2109 ANNOTATE_TOPDOWN(Inst, Ptr, Seq, S_Use);
2118 case S_MovableRelease:
2119 llvm_unreachable("top-down pointer in release state!");
2123 return NestingDetected;
2127 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
2128 DenseMap<const BasicBlock *, BBState> &BBStates,
2129 DenseMap<Value *, RRInfo> &Releases) {
2130 bool NestingDetected = false;
2131 BBState &MyStates = BBStates[BB];
2133 // Merge the states from each predecessor to compute the initial state
2134 // for the current block.
2135 BBState::edge_iterator PI(MyStates.pred_begin()),
2136 PE(MyStates.pred_end());
2138 const BasicBlock *Pred = *PI;
2139 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
2140 assert(I != BBStates.end());
2141 MyStates.InitFromPred(I->second);
2143 for (; PI != PE; ++PI) {
2145 I = BBStates.find(Pred);
2146 assert(I != BBStates.end());
2147 MyStates.MergePred(I->second);
2151 // If ARC Annotations are enabled, output the current state of pointers at the
2152 // top of the basic block.
2153 ANNOTATE_TOPDOWN_BBSTART(MyStates, BB);
2155 // Visit all the instructions, top-down.
2156 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
2157 Instruction *Inst = I;
2159 DEBUG(dbgs() << "ObjCARCOpt::VisitTopDown: Visiting " << *Inst << "\n");
2161 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
2164 // If ARC Annotations are enabled, output the current state of pointers at the
2165 // bottom of the basic block.
2166 ANNOTATE_TOPDOWN_BBEND(MyStates, BB);
2168 CheckForCFGHazards(BB, BBStates, MyStates);
2169 return NestingDetected;
2173 ComputePostOrders(Function &F,
2174 SmallVectorImpl<BasicBlock *> &PostOrder,
2175 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
2176 unsigned NoObjCARCExceptionsMDKind,
2177 DenseMap<const BasicBlock *, BBState> &BBStates) {
2178 /// The visited set, for doing DFS walks.
2179 SmallPtrSet<BasicBlock *, 16> Visited;
2181 // Do DFS, computing the PostOrder.
2182 SmallPtrSet<BasicBlock *, 16> OnStack;
2183 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
2185 // Functions always have exactly one entry block, and we don't have
2186 // any other block that we treat like an entry block.
2187 BasicBlock *EntryBB = &F.getEntryBlock();
2188 BBState &MyStates = BBStates[EntryBB];
2189 MyStates.SetAsEntry();
2190 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
2191 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
2192 Visited.insert(EntryBB);
2193 OnStack.insert(EntryBB);
2196 BasicBlock *CurrBB = SuccStack.back().first;
2197 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
2198 succ_iterator SE(TI, false);
2200 while (SuccStack.back().second != SE) {
2201 BasicBlock *SuccBB = *SuccStack.back().second++;
2202 if (Visited.insert(SuccBB)) {
2203 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
2204 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
2205 BBStates[CurrBB].addSucc(SuccBB);
2206 BBState &SuccStates = BBStates[SuccBB];
2207 SuccStates.addPred(CurrBB);
2208 OnStack.insert(SuccBB);
2212 if (!OnStack.count(SuccBB)) {
2213 BBStates[CurrBB].addSucc(SuccBB);
2214 BBStates[SuccBB].addPred(CurrBB);
2217 OnStack.erase(CurrBB);
2218 PostOrder.push_back(CurrBB);
2219 SuccStack.pop_back();
2220 } while (!SuccStack.empty());
2224 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
2225 // Functions may have many exits, and there also blocks which we treat
2226 // as exits due to ignored edges.
2227 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
2228 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
2229 BasicBlock *ExitBB = I;
2230 BBState &MyStates = BBStates[ExitBB];
2231 if (!MyStates.isExit())
2234 MyStates.SetAsExit();
2236 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
2237 Visited.insert(ExitBB);
2238 while (!PredStack.empty()) {
2239 reverse_dfs_next_succ:
2240 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
2241 while (PredStack.back().second != PE) {
2242 BasicBlock *BB = *PredStack.back().second++;
2243 if (Visited.insert(BB)) {
2244 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
2245 goto reverse_dfs_next_succ;
2248 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
2253 // Visit the function both top-down and bottom-up.
2255 ObjCARCOpt::Visit(Function &F,
2256 DenseMap<const BasicBlock *, BBState> &BBStates,
2257 MapVector<Value *, RRInfo> &Retains,
2258 DenseMap<Value *, RRInfo> &Releases) {
2260 // Use reverse-postorder traversals, because we magically know that loops
2261 // will be well behaved, i.e. they won't repeatedly call retain on a single
2262 // pointer without doing a release. We can't use the ReversePostOrderTraversal
2263 // class here because we want the reverse-CFG postorder to consider each
2264 // function exit point, and we want to ignore selected cycle edges.
2265 SmallVector<BasicBlock *, 16> PostOrder;
2266 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
2267 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
2268 NoObjCARCExceptionsMDKind,
2271 // Use reverse-postorder on the reverse CFG for bottom-up.
2272 bool BottomUpNestingDetected = false;
2273 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2274 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
2276 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
2278 // Use reverse-postorder for top-down.
2279 bool TopDownNestingDetected = false;
2280 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
2281 PostOrder.rbegin(), E = PostOrder.rend();
2283 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
2285 return TopDownNestingDetected && BottomUpNestingDetected;
2288 /// Move the calls in RetainsToMove and ReleasesToMove.
2289 void ObjCARCOpt::MoveCalls(Value *Arg,
2290 RRInfo &RetainsToMove,
2291 RRInfo &ReleasesToMove,
2292 MapVector<Value *, RRInfo> &Retains,
2293 DenseMap<Value *, RRInfo> &Releases,
2294 SmallVectorImpl<Instruction *> &DeadInsts,
2296 Type *ArgTy = Arg->getType();
2297 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
2299 // Insert the new retain and release calls.
2300 for (SmallPtrSet<Instruction *, 2>::const_iterator
2301 PI = ReleasesToMove.ReverseInsertPts.begin(),
2302 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2303 Instruction *InsertPt = *PI;
2304 Value *MyArg = ArgTy == ParamTy ? Arg :
2305 new BitCastInst(Arg, ParamTy, "", InsertPt);
2307 CallInst::Create(getRetainCallee(M), MyArg, "", InsertPt);
2308 Call->setDoesNotThrow();
2309 Call->setTailCall();
2311 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Release: " << *Call
2313 " At insertion point: " << *InsertPt
2316 for (SmallPtrSet<Instruction *, 2>::const_iterator
2317 PI = RetainsToMove.ReverseInsertPts.begin(),
2318 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
2319 Instruction *InsertPt = *PI;
2320 Value *MyArg = ArgTy == ParamTy ? Arg :
2321 new BitCastInst(Arg, ParamTy, "", InsertPt);
2322 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
2324 // Attach a clang.imprecise_release metadata tag, if appropriate.
2325 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
2326 Call->setMetadata(ImpreciseReleaseMDKind, M);
2327 Call->setDoesNotThrow();
2328 if (ReleasesToMove.IsTailCallRelease)
2329 Call->setTailCall();
2331 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Inserting new Retain: " << *Call
2333 " At insertion point: " << *InsertPt
2337 // Delete the original retain and release calls.
2338 for (SmallPtrSet<Instruction *, 2>::const_iterator
2339 AI = RetainsToMove.Calls.begin(),
2340 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
2341 Instruction *OrigRetain = *AI;
2342 Retains.blot(OrigRetain);
2343 DeadInsts.push_back(OrigRetain);
2344 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting retain: " << *OrigRetain <<
2347 for (SmallPtrSet<Instruction *, 2>::const_iterator
2348 AI = ReleasesToMove.Calls.begin(),
2349 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
2350 Instruction *OrigRelease = *AI;
2351 Releases.erase(OrigRelease);
2352 DeadInsts.push_back(OrigRelease);
2353 DEBUG(dbgs() << "ObjCARCOpt::MoveCalls: Deleting release: " << *OrigRelease
2359 ObjCARCOpt::ConnectTDBUTraversals(DenseMap<const BasicBlock *, BBState>
2361 MapVector<Value *, RRInfo> &Retains,
2362 DenseMap<Value *, RRInfo> &Releases,
2364 SmallVector<Instruction *, 4> &NewRetains,
2365 SmallVector<Instruction *, 4> &NewReleases,
2366 SmallVector<Instruction *, 8> &DeadInsts,
2367 RRInfo &RetainsToMove,
2368 RRInfo &ReleasesToMove,
2371 bool &AnyPairsCompletelyEliminated) {
2372 // If a pair happens in a region where it is known that the reference count
2373 // is already incremented, we can similarly ignore possible decrements.
2374 bool KnownSafeTD = true, KnownSafeBU = true;
2376 // Connect the dots between the top-down-collected RetainsToMove and
2377 // bottom-up-collected ReleasesToMove to form sets of related calls.
2378 // This is an iterative process so that we connect multiple releases
2379 // to multiple retains if needed.
2380 unsigned OldDelta = 0;
2381 unsigned NewDelta = 0;
2382 unsigned OldCount = 0;
2383 unsigned NewCount = 0;
2384 bool FirstRelease = true;
2386 for (SmallVectorImpl<Instruction *>::const_iterator
2387 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
2388 Instruction *NewRetain = *NI;
2389 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
2390 assert(It != Retains.end());
2391 const RRInfo &NewRetainRRI = It->second;
2392 KnownSafeTD &= NewRetainRRI.KnownSafe;
2393 for (SmallPtrSet<Instruction *, 2>::const_iterator
2394 LI = NewRetainRRI.Calls.begin(),
2395 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
2396 Instruction *NewRetainRelease = *LI;
2397 DenseMap<Value *, RRInfo>::const_iterator Jt =
2398 Releases.find(NewRetainRelease);
2399 if (Jt == Releases.end())
2401 const RRInfo &NewRetainReleaseRRI = Jt->second;
2402 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
2403 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
2405 BBStates[NewRetainRelease->getParent()].GetAllPathCount();
2407 // Merge the ReleaseMetadata and IsTailCallRelease values.
2409 ReleasesToMove.ReleaseMetadata =
2410 NewRetainReleaseRRI.ReleaseMetadata;
2411 ReleasesToMove.IsTailCallRelease =
2412 NewRetainReleaseRRI.IsTailCallRelease;
2413 FirstRelease = false;
2415 if (ReleasesToMove.ReleaseMetadata !=
2416 NewRetainReleaseRRI.ReleaseMetadata)
2417 ReleasesToMove.ReleaseMetadata = 0;
2418 if (ReleasesToMove.IsTailCallRelease !=
2419 NewRetainReleaseRRI.IsTailCallRelease)
2420 ReleasesToMove.IsTailCallRelease = false;
2423 // Collect the optimal insertion points.
2425 for (SmallPtrSet<Instruction *, 2>::const_iterator
2426 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
2427 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
2429 Instruction *RIP = *RI;
2430 if (ReleasesToMove.ReverseInsertPts.insert(RIP))
2431 NewDelta -= BBStates[RIP->getParent()].GetAllPathCount();
2433 NewReleases.push_back(NewRetainRelease);
2438 if (NewReleases.empty()) break;
2440 // Back the other way.
2441 for (SmallVectorImpl<Instruction *>::const_iterator
2442 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
2443 Instruction *NewRelease = *NI;
2444 DenseMap<Value *, RRInfo>::const_iterator It =
2445 Releases.find(NewRelease);
2446 assert(It != Releases.end());
2447 const RRInfo &NewReleaseRRI = It->second;
2448 KnownSafeBU &= NewReleaseRRI.KnownSafe;
2449 for (SmallPtrSet<Instruction *, 2>::const_iterator
2450 LI = NewReleaseRRI.Calls.begin(),
2451 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
2452 Instruction *NewReleaseRetain = *LI;
2453 MapVector<Value *, RRInfo>::const_iterator Jt =
2454 Retains.find(NewReleaseRetain);
2455 if (Jt == Retains.end())
2457 const RRInfo &NewReleaseRetainRRI = Jt->second;
2458 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
2459 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
2460 unsigned PathCount =
2461 BBStates[NewReleaseRetain->getParent()].GetAllPathCount();
2462 OldDelta += PathCount;
2463 OldCount += PathCount;
2465 // Collect the optimal insertion points.
2467 for (SmallPtrSet<Instruction *, 2>::const_iterator
2468 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
2469 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
2471 Instruction *RIP = *RI;
2472 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
2473 PathCount = BBStates[RIP->getParent()].GetAllPathCount();
2474 NewDelta += PathCount;
2475 NewCount += PathCount;
2478 NewRetains.push_back(NewReleaseRetain);
2482 NewReleases.clear();
2483 if (NewRetains.empty()) break;
2486 // If the pointer is known incremented or nested, we can safely delete the
2487 // pair regardless of what's between them.
2488 if (KnownSafeTD || KnownSafeBU) {
2489 RetainsToMove.ReverseInsertPts.clear();
2490 ReleasesToMove.ReverseInsertPts.clear();
2493 // Determine whether the new insertion points we computed preserve the
2494 // balance of retain and release calls through the program.
2495 // TODO: If the fully aggressive solution isn't valid, try to find a
2496 // less aggressive solution which is.
2501 // Determine whether the original call points are balanced in the retain and
2502 // release calls through the program. If not, conservatively don't touch
2504 // TODO: It's theoretically possible to do code motion in this case, as
2505 // long as the existing imbalances are maintained.
2510 assert(OldCount != 0 && "Unreachable code?");
2511 NumRRs += OldCount - NewCount;
2512 // Set to true if we completely removed any RR pairs.
2513 AnyPairsCompletelyEliminated = NewCount == 0;
2515 // We can move calls!
2519 /// Identify pairings between the retains and releases, and delete and/or move
2522 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
2524 MapVector<Value *, RRInfo> &Retains,
2525 DenseMap<Value *, RRInfo> &Releases,
2527 bool AnyPairsCompletelyEliminated = false;
2528 RRInfo RetainsToMove;
2529 RRInfo ReleasesToMove;
2530 SmallVector<Instruction *, 4> NewRetains;
2531 SmallVector<Instruction *, 4> NewReleases;
2532 SmallVector<Instruction *, 8> DeadInsts;
2534 // Visit each retain.
2535 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
2536 E = Retains.end(); I != E; ++I) {
2537 Value *V = I->first;
2538 if (!V) continue; // blotted
2540 Instruction *Retain = cast<Instruction>(V);
2542 DEBUG(dbgs() << "ObjCARCOpt::PerformCodePlacement: Visiting: " << *Retain
2545 Value *Arg = GetObjCArg(Retain);
2547 // If the object being released is in static or stack storage, we know it's
2548 // not being managed by ObjC reference counting, so we can delete pairs
2549 // regardless of what possible decrements or uses lie between them.
2550 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
2552 // A constant pointer can't be pointing to an object on the heap. It may
2553 // be reference-counted, but it won't be deleted.
2554 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
2555 if (const GlobalVariable *GV =
2556 dyn_cast<GlobalVariable>(
2557 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
2558 if (GV->isConstant())
2561 // Connect the dots between the top-down-collected RetainsToMove and
2562 // bottom-up-collected ReleasesToMove to form sets of related calls.
2563 NewRetains.push_back(Retain);
2564 bool PerformMoveCalls =
2565 ConnectTDBUTraversals(BBStates, Retains, Releases, M, NewRetains,
2566 NewReleases, DeadInsts, RetainsToMove,
2567 ReleasesToMove, Arg, KnownSafe,
2568 AnyPairsCompletelyEliminated);
2570 #ifdef ARC_ANNOTATIONS
2571 // Do not move calls if ARC annotations are requested. If we were to move
2572 // calls in this case, we would not be able
2573 PerformMoveCalls = PerformMoveCalls && !EnableARCAnnotations;
2574 #endif // ARC_ANNOTATIONS
2576 if (PerformMoveCalls) {
2577 // Ok, everything checks out and we're all set. Let's move/delete some
2579 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
2580 Retains, Releases, DeadInsts, M);
2583 // Clean up state for next retain.
2584 NewReleases.clear();
2586 RetainsToMove.clear();
2587 ReleasesToMove.clear();
2590 // Now that we're done moving everything, we can delete the newly dead
2591 // instructions, as we no longer need them as insert points.
2592 while (!DeadInsts.empty())
2593 EraseInstruction(DeadInsts.pop_back_val());
2595 return AnyPairsCompletelyEliminated;
2598 /// Weak pointer optimizations.
2599 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
2600 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
2601 // itself because it uses AliasAnalysis and we need to do provenance
2603 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2604 Instruction *Inst = &*I++;
2606 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Visiting: " << *Inst <<
2609 InstructionClass Class = GetBasicInstructionClass(Inst);
2610 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
2613 // Delete objc_loadWeak calls with no users.
2614 if (Class == IC_LoadWeak && Inst->use_empty()) {
2615 Inst->eraseFromParent();
2619 // TODO: For now, just look for an earlier available version of this value
2620 // within the same block. Theoretically, we could do memdep-style non-local
2621 // analysis too, but that would want caching. A better approach would be to
2622 // use the technique that EarlyCSE uses.
2623 inst_iterator Current = llvm::prior(I);
2624 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
2625 for (BasicBlock::iterator B = CurrentBB->begin(),
2626 J = Current.getInstructionIterator();
2628 Instruction *EarlierInst = &*llvm::prior(J);
2629 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
2630 switch (EarlierClass) {
2632 case IC_LoadWeakRetained: {
2633 // If this is loading from the same pointer, replace this load's value
2635 CallInst *Call = cast<CallInst>(Inst);
2636 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2637 Value *Arg = Call->getArgOperand(0);
2638 Value *EarlierArg = EarlierCall->getArgOperand(0);
2639 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2640 case AliasAnalysis::MustAlias:
2642 // If the load has a builtin retain, insert a plain retain for it.
2643 if (Class == IC_LoadWeakRetained) {
2645 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2649 // Zap the fully redundant load.
2650 Call->replaceAllUsesWith(EarlierCall);
2651 Call->eraseFromParent();
2653 case AliasAnalysis::MayAlias:
2654 case AliasAnalysis::PartialAlias:
2656 case AliasAnalysis::NoAlias:
2663 // If this is storing to the same pointer and has the same size etc.
2664 // replace this load's value with the stored value.
2665 CallInst *Call = cast<CallInst>(Inst);
2666 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
2667 Value *Arg = Call->getArgOperand(0);
2668 Value *EarlierArg = EarlierCall->getArgOperand(0);
2669 switch (PA.getAA()->alias(Arg, EarlierArg)) {
2670 case AliasAnalysis::MustAlias:
2672 // If the load has a builtin retain, insert a plain retain for it.
2673 if (Class == IC_LoadWeakRetained) {
2675 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
2679 // Zap the fully redundant load.
2680 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
2681 Call->eraseFromParent();
2683 case AliasAnalysis::MayAlias:
2684 case AliasAnalysis::PartialAlias:
2686 case AliasAnalysis::NoAlias:
2693 // TOOD: Grab the copied value.
2695 case IC_AutoreleasepoolPush:
2697 case IC_IntrinsicUser:
2699 // Weak pointers are only modified through the weak entry points
2700 // (and arbitrary calls, which could call the weak entry points).
2703 // Anything else could modify the weak pointer.
2710 // Then, for each destroyWeak with an alloca operand, check to see if
2711 // the alloca and all its users can be zapped.
2712 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2713 Instruction *Inst = &*I++;
2714 InstructionClass Class = GetBasicInstructionClass(Inst);
2715 if (Class != IC_DestroyWeak)
2718 CallInst *Call = cast<CallInst>(Inst);
2719 Value *Arg = Call->getArgOperand(0);
2720 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
2721 for (Value::use_iterator UI = Alloca->use_begin(),
2722 UE = Alloca->use_end(); UI != UE; ++UI) {
2723 const Instruction *UserInst = cast<Instruction>(*UI);
2724 switch (GetBasicInstructionClass(UserInst)) {
2727 case IC_DestroyWeak:
2734 for (Value::use_iterator UI = Alloca->use_begin(),
2735 UE = Alloca->use_end(); UI != UE; ) {
2736 CallInst *UserInst = cast<CallInst>(*UI++);
2737 switch (GetBasicInstructionClass(UserInst)) {
2740 // These functions return their second argument.
2741 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
2743 case IC_DestroyWeak:
2747 llvm_unreachable("alloca really is used!");
2749 UserInst->eraseFromParent();
2751 Alloca->eraseFromParent();
2756 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Finished List.\n\n");
2760 /// Identify program paths which execute sequences of retains and releases which
2761 /// can be eliminated.
2762 bool ObjCARCOpt::OptimizeSequences(Function &F) {
2763 /// Releases, Retains - These are used to store the results of the main flow
2764 /// analysis. These use Value* as the key instead of Instruction* so that the
2765 /// map stays valid when we get around to rewriting code and calls get
2766 /// replaced by arguments.
2767 DenseMap<Value *, RRInfo> Releases;
2768 MapVector<Value *, RRInfo> Retains;
2770 /// This is used during the traversal of the function to track the
2771 /// states for each identified object at each block.
2772 DenseMap<const BasicBlock *, BBState> BBStates;
2774 // Analyze the CFG of the function, and all instructions.
2775 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
2778 return PerformCodePlacement(BBStates, Retains, Releases, F.getParent()) &&
2782 // Check if there is a dependent call earlier that does not have anything in
2783 // between the Retain and the call that can affect the reference count of their
2784 // shared pointer argument. Note that Retain need not be in BB.
2786 HasSafePathToPredecessorCall(const Value *Arg, Instruction *Retain,
2787 SmallPtrSet<Instruction *, 4> &DepInsts,
2788 SmallPtrSet<const BasicBlock *, 4> &Visited,
2789 ProvenanceAnalysis &PA) {
2790 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
2791 DepInsts, Visited, PA);
2792 if (DepInsts.size() != 1)
2796 dyn_cast_or_null<CallInst>(*DepInsts.begin());
2798 // Check that the pointer is the return value of the call.
2799 if (!Call || Arg != Call)
2802 // Check that the call is a regular call.
2803 InstructionClass Class = GetBasicInstructionClass(Call);
2804 if (Class != IC_CallOrUser && Class != IC_Call)
2810 /// Look for this pattern:
2812 /// %call = call i8* @something(...)
2813 /// %2 = call i8* @objc_retain(i8* %call)
2814 /// %3 = call i8* @objc_autorelease(i8* %2)
2817 /// And delete the retain and autorelease.
2818 void ObjCARCOpt::OptimizeReturns(Function &F) {
2819 if (!F.getReturnType()->isPointerTy())
2822 SmallPtrSet<Instruction *, 4> DependingInstructions;
2823 SmallPtrSet<const BasicBlock *, 4> Visited;
2824 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
2825 BasicBlock *BB = FI;
2826 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
2828 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Visiting: " << *Ret << "\n");
2832 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
2833 FindDependencies(NeedsPositiveRetainCount, Arg,
2834 BB, Ret, DependingInstructions, Visited, PA);
2835 if (DependingInstructions.size() != 1)
2839 CallInst *Autorelease =
2840 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
2843 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
2844 if (!IsAutorelease(AutoreleaseClass))
2846 if (GetObjCArg(Autorelease) != Arg)
2849 DependingInstructions.clear();
2852 // Check that there is nothing that can affect the reference
2853 // count between the autorelease and the retain.
2854 FindDependencies(CanChangeRetainCount, Arg,
2855 BB, Autorelease, DependingInstructions, Visited, PA);
2856 if (DependingInstructions.size() != 1)
2861 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
2863 // Check that we found a retain with the same argument.
2865 !IsRetain(GetBasicInstructionClass(Retain)) ||
2866 GetObjCArg(Retain) != Arg)
2869 DependingInstructions.clear();
2872 // Check that there is nothing that can affect the reference count
2873 // between the retain and the call. Note that Retain need not be in BB.
2874 if (HasSafePathToPredecessorCall(Arg, Retain, DependingInstructions,
2876 // If so, we can zap the retain and autorelease.
2879 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Erasing: " << *Retain
2881 << *Autorelease << "\n");
2882 EraseInstruction(Retain);
2883 EraseInstruction(Autorelease);
2889 DependingInstructions.clear();
2893 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Finished List.\n\n");
2897 bool ObjCARCOpt::doInitialization(Module &M) {
2901 // If nothing in the Module uses ARC, don't do anything.
2902 Run = ModuleHasARC(M);
2906 // Identify the imprecise release metadata kind.
2907 ImpreciseReleaseMDKind =
2908 M.getContext().getMDKindID("clang.imprecise_release");
2909 CopyOnEscapeMDKind =
2910 M.getContext().getMDKindID("clang.arc.copy_on_escape");
2911 NoObjCARCExceptionsMDKind =
2912 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
2913 #ifdef ARC_ANNOTATIONS
2914 ARCAnnotationBottomUpMDKind =
2915 M.getContext().getMDKindID("llvm.arc.annotation.bottomup");
2916 ARCAnnotationTopDownMDKind =
2917 M.getContext().getMDKindID("llvm.arc.annotation.topdown");
2918 ARCAnnotationProvenanceSourceMDKind =
2919 M.getContext().getMDKindID("llvm.arc.annotation.provenancesource");
2920 #endif // ARC_ANNOTATIONS
2922 // Intuitively, objc_retain and others are nocapture, however in practice
2923 // they are not, because they return their argument value. And objc_release
2924 // calls finalizers which can have arbitrary side effects.
2926 // These are initialized lazily.
2928 AutoreleaseRVCallee = 0;
2931 RetainBlockCallee = 0;
2932 AutoreleaseCallee = 0;
2937 bool ObjCARCOpt::runOnFunction(Function &F) {
2941 // If nothing in the Module uses ARC, don't do anything.
2947 DEBUG(dbgs() << "ObjCARCOpt: Visiting Function: " << F.getName() << "\n");
2949 PA.setAA(&getAnalysis<AliasAnalysis>());
2951 // This pass performs several distinct transformations. As a compile-time aid
2952 // when compiling code that isn't ObjC, skip these if the relevant ObjC
2953 // library functions aren't declared.
2955 // Preliminary optimizations. This also computs UsedInThisFunction.
2956 OptimizeIndividualCalls(F);
2958 // Optimizations for weak pointers.
2959 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
2960 (1 << IC_LoadWeakRetained) |
2961 (1 << IC_StoreWeak) |
2962 (1 << IC_InitWeak) |
2963 (1 << IC_CopyWeak) |
2964 (1 << IC_MoveWeak) |
2965 (1 << IC_DestroyWeak)))
2966 OptimizeWeakCalls(F);
2968 // Optimizations for retain+release pairs.
2969 if (UsedInThisFunction & ((1 << IC_Retain) |
2970 (1 << IC_RetainRV) |
2971 (1 << IC_RetainBlock)))
2972 if (UsedInThisFunction & (1 << IC_Release))
2973 // Run OptimizeSequences until it either stops making changes or
2974 // no retain+release pair nesting is detected.
2975 while (OptimizeSequences(F)) {}
2977 // Optimizations if objc_autorelease is used.
2978 if (UsedInThisFunction & ((1 << IC_Autorelease) |
2979 (1 << IC_AutoreleaseRV)))
2982 DEBUG(dbgs() << "\n");
2987 void ObjCARCOpt::releaseMemory() {