1 //===- ObjCARC.cpp - ObjC ARC Optimization --------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file defines ObjC ARC optimizations. ARC stands for
11 // Automatic Reference Counting and is a system for managing reference counts
12 // for objects in Objective C.
14 // The optimizations performed include elimination of redundant, partially
15 // redundant, and inconsequential reference count operations, elimination of
16 // redundant weak pointer operations, pattern-matching and replacement of
17 // low-level operations into higher-level operations, and numerous minor
20 // This file also defines a simple ARC-aware AliasAnalysis.
22 // WARNING: This file knows about certain library functions. It recognizes them
23 // by name, and hardwires knowledge of their semantics.
25 // WARNING: This file knows about how certain Objective-C library functions are
26 // used. Naive LLVM IR transformations which would otherwise be
27 // behavior-preserving may break these assumptions.
29 //===----------------------------------------------------------------------===//
31 #define DEBUG_TYPE "objc-arc"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
38 // A handy option to enable/disable all optimizations in this file.
39 static cl::opt<bool> EnableARCOpts("enable-objc-arc-opts", cl::init(true));
41 //===----------------------------------------------------------------------===//
43 //===----------------------------------------------------------------------===//
46 /// MapVector - An associative container with fast insertion-order
47 /// (deterministic) iteration over its elements. Plus the special
49 template<class KeyT, class ValueT>
51 /// Map - Map keys to indices in Vector.
52 typedef DenseMap<KeyT, size_t> MapTy;
55 /// Vector - Keys and values.
56 typedef std::vector<std::pair<KeyT, ValueT> > VectorTy;
60 typedef typename VectorTy::iterator iterator;
61 typedef typename VectorTy::const_iterator const_iterator;
62 iterator begin() { return Vector.begin(); }
63 iterator end() { return Vector.end(); }
64 const_iterator begin() const { return Vector.begin(); }
65 const_iterator end() const { return Vector.end(); }
69 assert(Vector.size() >= Map.size()); // May differ due to blotting.
70 for (typename MapTy::const_iterator I = Map.begin(), E = Map.end();
72 assert(I->second < Vector.size());
73 assert(Vector[I->second].first == I->first);
75 for (typename VectorTy::const_iterator I = Vector.begin(),
76 E = Vector.end(); I != E; ++I)
78 (Map.count(I->first) &&
79 Map[I->first] == size_t(I - Vector.begin())));
83 ValueT &operator[](const KeyT &Arg) {
84 std::pair<typename MapTy::iterator, bool> Pair =
85 Map.insert(std::make_pair(Arg, size_t(0)));
87 size_t Num = Vector.size();
88 Pair.first->second = Num;
89 Vector.push_back(std::make_pair(Arg, ValueT()));
90 return Vector[Num].second;
92 return Vector[Pair.first->second].second;
95 std::pair<iterator, bool>
96 insert(const std::pair<KeyT, ValueT> &InsertPair) {
97 std::pair<typename MapTy::iterator, bool> Pair =
98 Map.insert(std::make_pair(InsertPair.first, size_t(0)));
100 size_t Num = Vector.size();
101 Pair.first->second = Num;
102 Vector.push_back(InsertPair);
103 return std::make_pair(Vector.begin() + Num, true);
105 return std::make_pair(Vector.begin() + Pair.first->second, false);
108 const_iterator find(const KeyT &Key) const {
109 typename MapTy::const_iterator It = Map.find(Key);
110 if (It == Map.end()) return Vector.end();
111 return Vector.begin() + It->second;
114 /// blot - This is similar to erase, but instead of removing the element
115 /// from the vector, it just zeros out the key in the vector. This leaves
116 /// iterators intact, but clients must be prepared for zeroed-out keys when
118 void blot(const KeyT &Key) {
119 typename MapTy::iterator It = Map.find(Key);
120 if (It == Map.end()) return;
121 Vector[It->second].first = KeyT();
132 //===----------------------------------------------------------------------===//
134 //===----------------------------------------------------------------------===//
136 #include "llvm/ADT/StringSwitch.h"
137 #include "llvm/Analysis/ValueTracking.h"
138 #include "llvm/IR/Intrinsics.h"
139 #include "llvm/IR/Module.h"
140 #include "llvm/Support/CallSite.h"
141 #include "llvm/Transforms/Utils/Local.h"
144 /// InstructionClass - A simple classification for instructions.
145 enum InstructionClass {
146 IC_Retain, ///< objc_retain
147 IC_RetainRV, ///< objc_retainAutoreleasedReturnValue
148 IC_RetainBlock, ///< objc_retainBlock
149 IC_Release, ///< objc_release
150 IC_Autorelease, ///< objc_autorelease
151 IC_AutoreleaseRV, ///< objc_autoreleaseReturnValue
152 IC_AutoreleasepoolPush, ///< objc_autoreleasePoolPush
153 IC_AutoreleasepoolPop, ///< objc_autoreleasePoolPop
154 IC_NoopCast, ///< objc_retainedObject, etc.
155 IC_FusedRetainAutorelease, ///< objc_retainAutorelease
156 IC_FusedRetainAutoreleaseRV, ///< objc_retainAutoreleaseReturnValue
157 IC_LoadWeakRetained, ///< objc_loadWeakRetained (primitive)
158 IC_StoreWeak, ///< objc_storeWeak (primitive)
159 IC_InitWeak, ///< objc_initWeak (derived)
160 IC_LoadWeak, ///< objc_loadWeak (derived)
161 IC_MoveWeak, ///< objc_moveWeak (derived)
162 IC_CopyWeak, ///< objc_copyWeak (derived)
163 IC_DestroyWeak, ///< objc_destroyWeak (derived)
164 IC_StoreStrong, ///< objc_storeStrong (derived)
165 IC_CallOrUser, ///< could call objc_release and/or "use" pointers
166 IC_Call, ///< could call objc_release
167 IC_User, ///< could "use" a pointer
168 IC_None ///< anything else
172 /// IsPotentialUse - Test whether the given value is possible a
173 /// reference-counted pointer.
174 static bool IsPotentialUse(const Value *Op) {
175 // Pointers to static or stack storage are not reference-counted pointers.
176 if (isa<Constant>(Op) || isa<AllocaInst>(Op))
178 // Special arguments are not reference-counted.
179 if (const Argument *Arg = dyn_cast<Argument>(Op))
180 if (Arg->hasByValAttr() ||
181 Arg->hasNestAttr() ||
182 Arg->hasStructRetAttr())
184 // Only consider values with pointer types.
185 // It seemes intuitive to exclude function pointer types as well, since
186 // functions are never reference-counted, however clang occasionally
187 // bitcasts reference-counted pointers to function-pointer type
189 PointerType *Ty = dyn_cast<PointerType>(Op->getType());
192 // Conservatively assume anything else is a potential use.
196 /// GetCallSiteClass - Helper for GetInstructionClass. Determines what kind
197 /// of construct CS is.
198 static InstructionClass GetCallSiteClass(ImmutableCallSite CS) {
199 for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
201 if (IsPotentialUse(*I))
202 return CS.onlyReadsMemory() ? IC_User : IC_CallOrUser;
204 return CS.onlyReadsMemory() ? IC_None : IC_Call;
207 /// GetFunctionClass - Determine if F is one of the special known Functions.
208 /// If it isn't, return IC_CallOrUser.
209 static InstructionClass GetFunctionClass(const Function *F) {
210 Function::const_arg_iterator AI = F->arg_begin(), AE = F->arg_end();
214 return StringSwitch<InstructionClass>(F->getName())
215 .Case("objc_autoreleasePoolPush", IC_AutoreleasepoolPush)
216 .Default(IC_CallOrUser);
219 const Argument *A0 = AI++;
221 // Argument is a pointer.
222 if (PointerType *PTy = dyn_cast<PointerType>(A0->getType())) {
223 Type *ETy = PTy->getElementType();
225 if (ETy->isIntegerTy(8))
226 return StringSwitch<InstructionClass>(F->getName())
227 .Case("objc_retain", IC_Retain)
228 .Case("objc_retainAutoreleasedReturnValue", IC_RetainRV)
229 .Case("objc_retainBlock", IC_RetainBlock)
230 .Case("objc_release", IC_Release)
231 .Case("objc_autorelease", IC_Autorelease)
232 .Case("objc_autoreleaseReturnValue", IC_AutoreleaseRV)
233 .Case("objc_autoreleasePoolPop", IC_AutoreleasepoolPop)
234 .Case("objc_retainedObject", IC_NoopCast)
235 .Case("objc_unretainedObject", IC_NoopCast)
236 .Case("objc_unretainedPointer", IC_NoopCast)
237 .Case("objc_retain_autorelease", IC_FusedRetainAutorelease)
238 .Case("objc_retainAutorelease", IC_FusedRetainAutorelease)
239 .Case("objc_retainAutoreleaseReturnValue",IC_FusedRetainAutoreleaseRV)
240 .Default(IC_CallOrUser);
243 if (PointerType *Pte = dyn_cast<PointerType>(ETy))
244 if (Pte->getElementType()->isIntegerTy(8))
245 return StringSwitch<InstructionClass>(F->getName())
246 .Case("objc_loadWeakRetained", IC_LoadWeakRetained)
247 .Case("objc_loadWeak", IC_LoadWeak)
248 .Case("objc_destroyWeak", IC_DestroyWeak)
249 .Default(IC_CallOrUser);
252 // Two arguments, first is i8**.
253 const Argument *A1 = AI++;
255 if (PointerType *PTy = dyn_cast<PointerType>(A0->getType()))
256 if (PointerType *Pte = dyn_cast<PointerType>(PTy->getElementType()))
257 if (Pte->getElementType()->isIntegerTy(8))
258 if (PointerType *PTy1 = dyn_cast<PointerType>(A1->getType())) {
259 Type *ETy1 = PTy1->getElementType();
260 // Second argument is i8*
261 if (ETy1->isIntegerTy(8))
262 return StringSwitch<InstructionClass>(F->getName())
263 .Case("objc_storeWeak", IC_StoreWeak)
264 .Case("objc_initWeak", IC_InitWeak)
265 .Case("objc_storeStrong", IC_StoreStrong)
266 .Default(IC_CallOrUser);
267 // Second argument is i8**.
268 if (PointerType *Pte1 = dyn_cast<PointerType>(ETy1))
269 if (Pte1->getElementType()->isIntegerTy(8))
270 return StringSwitch<InstructionClass>(F->getName())
271 .Case("objc_moveWeak", IC_MoveWeak)
272 .Case("objc_copyWeak", IC_CopyWeak)
273 .Default(IC_CallOrUser);
277 return IC_CallOrUser;
280 /// GetInstructionClass - Determine what kind of construct V is.
281 static InstructionClass GetInstructionClass(const Value *V) {
282 if (const Instruction *I = dyn_cast<Instruction>(V)) {
283 // Any instruction other than bitcast and gep with a pointer operand have a
284 // use of an objc pointer. Bitcasts, GEPs, Selects, PHIs transfer a pointer
285 // to a subsequent use, rather than using it themselves, in this sense.
286 // As a short cut, several other opcodes are known to have no pointer
287 // operands of interest. And ret is never followed by a release, so it's
288 // not interesting to examine.
289 switch (I->getOpcode()) {
290 case Instruction::Call: {
291 const CallInst *CI = cast<CallInst>(I);
292 // Check for calls to special functions.
293 if (const Function *F = CI->getCalledFunction()) {
294 InstructionClass Class = GetFunctionClass(F);
295 if (Class != IC_CallOrUser)
298 // None of the intrinsic functions do objc_release. For intrinsics, the
299 // only question is whether or not they may be users.
300 switch (F->getIntrinsicID()) {
301 case Intrinsic::returnaddress: case Intrinsic::frameaddress:
302 case Intrinsic::stacksave: case Intrinsic::stackrestore:
303 case Intrinsic::vastart: case Intrinsic::vacopy: case Intrinsic::vaend:
304 case Intrinsic::objectsize: case Intrinsic::prefetch:
305 case Intrinsic::stackprotector:
306 case Intrinsic::eh_return_i32: case Intrinsic::eh_return_i64:
307 case Intrinsic::eh_typeid_for: case Intrinsic::eh_dwarf_cfa:
308 case Intrinsic::eh_sjlj_lsda: case Intrinsic::eh_sjlj_functioncontext:
309 case Intrinsic::init_trampoline: case Intrinsic::adjust_trampoline:
310 case Intrinsic::lifetime_start: case Intrinsic::lifetime_end:
311 case Intrinsic::invariant_start: case Intrinsic::invariant_end:
312 // Don't let dbg info affect our results.
313 case Intrinsic::dbg_declare: case Intrinsic::dbg_value:
314 // Short cut: Some intrinsics obviously don't use ObjC pointers.
320 return GetCallSiteClass(CI);
322 case Instruction::Invoke:
323 return GetCallSiteClass(cast<InvokeInst>(I));
324 case Instruction::BitCast:
325 case Instruction::GetElementPtr:
326 case Instruction::Select: case Instruction::PHI:
327 case Instruction::Ret: case Instruction::Br:
328 case Instruction::Switch: case Instruction::IndirectBr:
329 case Instruction::Alloca: case Instruction::VAArg:
330 case Instruction::Add: case Instruction::FAdd:
331 case Instruction::Sub: case Instruction::FSub:
332 case Instruction::Mul: case Instruction::FMul:
333 case Instruction::SDiv: case Instruction::UDiv: case Instruction::FDiv:
334 case Instruction::SRem: case Instruction::URem: case Instruction::FRem:
335 case Instruction::Shl: case Instruction::LShr: case Instruction::AShr:
336 case Instruction::And: case Instruction::Or: case Instruction::Xor:
337 case Instruction::SExt: case Instruction::ZExt: case Instruction::Trunc:
338 case Instruction::IntToPtr: case Instruction::FCmp:
339 case Instruction::FPTrunc: case Instruction::FPExt:
340 case Instruction::FPToUI: case Instruction::FPToSI:
341 case Instruction::UIToFP: case Instruction::SIToFP:
342 case Instruction::InsertElement: case Instruction::ExtractElement:
343 case Instruction::ShuffleVector:
344 case Instruction::ExtractValue:
346 case Instruction::ICmp:
347 // Comparing a pointer with null, or any other constant, isn't an
348 // interesting use, because we don't care what the pointer points to, or
349 // about the values of any other dynamic reference-counted pointers.
350 if (IsPotentialUse(I->getOperand(1)))
354 // For anything else, check all the operands.
355 // Note that this includes both operands of a Store: while the first
356 // operand isn't actually being dereferenced, it is being stored to
357 // memory where we can no longer track who might read it and dereference
358 // it, so we have to consider it potentially used.
359 for (User::const_op_iterator OI = I->op_begin(), OE = I->op_end();
361 if (IsPotentialUse(*OI))
366 // Otherwise, it's totally inert for ARC purposes.
370 /// GetBasicInstructionClass - Determine what kind of construct V is. This is
371 /// similar to GetInstructionClass except that it only detects objc runtine
372 /// calls. This allows it to be faster.
373 static InstructionClass GetBasicInstructionClass(const Value *V) {
374 if (const CallInst *CI = dyn_cast<CallInst>(V)) {
375 if (const Function *F = CI->getCalledFunction())
376 return GetFunctionClass(F);
377 // Otherwise, be conservative.
378 return IC_CallOrUser;
381 // Otherwise, be conservative.
382 return isa<InvokeInst>(V) ? IC_CallOrUser : IC_User;
385 /// IsRetain - Test if the given class is objc_retain or
387 static bool IsRetain(InstructionClass Class) {
388 return Class == IC_Retain ||
389 Class == IC_RetainRV;
392 /// IsAutorelease - Test if the given class is objc_autorelease or
394 static bool IsAutorelease(InstructionClass Class) {
395 return Class == IC_Autorelease ||
396 Class == IC_AutoreleaseRV;
399 /// IsForwarding - Test if the given class represents instructions which return
400 /// their argument verbatim.
401 static bool IsForwarding(InstructionClass Class) {
402 // objc_retainBlock technically doesn't always return its argument
403 // verbatim, but it doesn't matter for our purposes here.
404 return Class == IC_Retain ||
405 Class == IC_RetainRV ||
406 Class == IC_Autorelease ||
407 Class == IC_AutoreleaseRV ||
408 Class == IC_RetainBlock ||
409 Class == IC_NoopCast;
412 /// IsNoopOnNull - Test if the given class represents instructions which do
413 /// nothing if passed a null pointer.
414 static bool IsNoopOnNull(InstructionClass Class) {
415 return Class == IC_Retain ||
416 Class == IC_RetainRV ||
417 Class == IC_Release ||
418 Class == IC_Autorelease ||
419 Class == IC_AutoreleaseRV ||
420 Class == IC_RetainBlock;
423 /// IsAlwaysTail - Test if the given class represents instructions which are
424 /// always safe to mark with the "tail" keyword.
425 static bool IsAlwaysTail(InstructionClass Class) {
426 // IC_RetainBlock may be given a stack argument.
427 return Class == IC_Retain ||
428 Class == IC_RetainRV ||
429 Class == IC_Autorelease ||
430 Class == IC_AutoreleaseRV;
433 /// IsNoThrow - Test if the given class represents instructions which are always
434 /// safe to mark with the nounwind attribute..
435 static bool IsNoThrow(InstructionClass Class) {
436 // objc_retainBlock is not nounwind because it calls user copy constructors
437 // which could theoretically throw.
438 return Class == IC_Retain ||
439 Class == IC_RetainRV ||
440 Class == IC_Release ||
441 Class == IC_Autorelease ||
442 Class == IC_AutoreleaseRV ||
443 Class == IC_AutoreleasepoolPush ||
444 Class == IC_AutoreleasepoolPop;
447 /// EraseInstruction - Erase the given instruction. Many ObjC calls return their
448 /// argument verbatim, so if it's such a call and the return value has users,
449 /// replace them with the argument value.
450 static void EraseInstruction(Instruction *CI) {
451 Value *OldArg = cast<CallInst>(CI)->getArgOperand(0);
453 bool Unused = CI->use_empty();
456 // Replace the return value with the argument.
457 assert(IsForwarding(GetBasicInstructionClass(CI)) &&
458 "Can't delete non-forwarding instruction with users!");
459 CI->replaceAllUsesWith(OldArg);
462 CI->eraseFromParent();
465 RecursivelyDeleteTriviallyDeadInstructions(OldArg);
468 /// GetUnderlyingObjCPtr - This is a wrapper around getUnderlyingObject which
469 /// also knows how to look through objc_retain and objc_autorelease calls, which
470 /// we know to return their argument verbatim.
471 static const Value *GetUnderlyingObjCPtr(const Value *V) {
473 V = GetUnderlyingObject(V);
474 if (!IsForwarding(GetBasicInstructionClass(V)))
476 V = cast<CallInst>(V)->getArgOperand(0);
482 /// StripPointerCastsAndObjCCalls - This is a wrapper around
483 /// Value::stripPointerCasts which also knows how to look through objc_retain
484 /// and objc_autorelease calls, which we know to return their argument verbatim.
485 static const Value *StripPointerCastsAndObjCCalls(const Value *V) {
487 V = V->stripPointerCasts();
488 if (!IsForwarding(GetBasicInstructionClass(V)))
490 V = cast<CallInst>(V)->getArgOperand(0);
495 /// StripPointerCastsAndObjCCalls - This is a wrapper around
496 /// Value::stripPointerCasts which also knows how to look through objc_retain
497 /// and objc_autorelease calls, which we know to return their argument verbatim.
498 static Value *StripPointerCastsAndObjCCalls(Value *V) {
500 V = V->stripPointerCasts();
501 if (!IsForwarding(GetBasicInstructionClass(V)))
503 V = cast<CallInst>(V)->getArgOperand(0);
508 /// GetObjCArg - Assuming the given instruction is one of the special calls such
509 /// as objc_retain or objc_release, return the argument value, stripped of no-op
510 /// casts and forwarding calls.
511 static Value *GetObjCArg(Value *Inst) {
512 return StripPointerCastsAndObjCCalls(cast<CallInst>(Inst)->getArgOperand(0));
515 /// IsObjCIdentifiedObject - This is similar to AliasAnalysis'
516 /// isObjCIdentifiedObject, except that it uses special knowledge of
517 /// ObjC conventions...
518 static bool IsObjCIdentifiedObject(const Value *V) {
519 // Assume that call results and arguments have their own "provenance".
520 // Constants (including GlobalVariables) and Allocas are never
521 // reference-counted.
522 if (isa<CallInst>(V) || isa<InvokeInst>(V) ||
523 isa<Argument>(V) || isa<Constant>(V) ||
527 if (const LoadInst *LI = dyn_cast<LoadInst>(V)) {
528 const Value *Pointer =
529 StripPointerCastsAndObjCCalls(LI->getPointerOperand());
530 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Pointer)) {
531 // A constant pointer can't be pointing to an object on the heap. It may
532 // be reference-counted, but it won't be deleted.
533 if (GV->isConstant())
535 StringRef Name = GV->getName();
536 // These special variables are known to hold values which are not
537 // reference-counted pointers.
538 if (Name.startswith("\01L_OBJC_SELECTOR_REFERENCES_") ||
539 Name.startswith("\01L_OBJC_CLASSLIST_REFERENCES_") ||
540 Name.startswith("\01L_OBJC_CLASSLIST_SUP_REFS_$_") ||
541 Name.startswith("\01L_OBJC_METH_VAR_NAME_") ||
542 Name.startswith("\01l_objc_msgSend_fixup_"))
550 /// FindSingleUseIdentifiedObject - This is similar to
551 /// StripPointerCastsAndObjCCalls but it stops as soon as it finds a value
552 /// with multiple uses.
553 static const Value *FindSingleUseIdentifiedObject(const Value *Arg) {
554 if (Arg->hasOneUse()) {
555 if (const BitCastInst *BC = dyn_cast<BitCastInst>(Arg))
556 return FindSingleUseIdentifiedObject(BC->getOperand(0));
557 if (const GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Arg))
558 if (GEP->hasAllZeroIndices())
559 return FindSingleUseIdentifiedObject(GEP->getPointerOperand());
560 if (IsForwarding(GetBasicInstructionClass(Arg)))
561 return FindSingleUseIdentifiedObject(
562 cast<CallInst>(Arg)->getArgOperand(0));
563 if (!IsObjCIdentifiedObject(Arg))
568 // If we found an identifiable object but it has multiple uses, but they are
569 // trivial uses, we can still consider this to be a single-use value.
570 if (IsObjCIdentifiedObject(Arg)) {
571 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
574 if (!U->use_empty() || StripPointerCastsAndObjCCalls(U) != Arg)
584 /// ModuleHasARC - Test if the given module looks interesting to run ARC
586 static bool ModuleHasARC(const Module &M) {
588 M.getNamedValue("objc_retain") ||
589 M.getNamedValue("objc_release") ||
590 M.getNamedValue("objc_autorelease") ||
591 M.getNamedValue("objc_retainAutoreleasedReturnValue") ||
592 M.getNamedValue("objc_retainBlock") ||
593 M.getNamedValue("objc_autoreleaseReturnValue") ||
594 M.getNamedValue("objc_autoreleasePoolPush") ||
595 M.getNamedValue("objc_loadWeakRetained") ||
596 M.getNamedValue("objc_loadWeak") ||
597 M.getNamedValue("objc_destroyWeak") ||
598 M.getNamedValue("objc_storeWeak") ||
599 M.getNamedValue("objc_initWeak") ||
600 M.getNamedValue("objc_moveWeak") ||
601 M.getNamedValue("objc_copyWeak") ||
602 M.getNamedValue("objc_retainedObject") ||
603 M.getNamedValue("objc_unretainedObject") ||
604 M.getNamedValue("objc_unretainedPointer");
607 /// DoesObjCBlockEscape - Test whether the given pointer, which is an
608 /// Objective C block pointer, does not "escape". This differs from regular
609 /// escape analysis in that a use as an argument to a call is not considered
611 static bool DoesObjCBlockEscape(const Value *BlockPtr) {
612 // Walk the def-use chains.
613 SmallVector<const Value *, 4> Worklist;
614 Worklist.push_back(BlockPtr);
616 const Value *V = Worklist.pop_back_val();
617 for (Value::const_use_iterator UI = V->use_begin(), UE = V->use_end();
619 const User *UUser = *UI;
620 // Special - Use by a call (callee or argument) is not considered
622 switch (GetBasicInstructionClass(UUser)) {
627 case IC_AutoreleaseRV:
628 // These special functions make copies of their pointer arguments.
632 // Use by an instruction which copies the value is an escape if the
633 // result is an escape.
634 if (isa<BitCastInst>(UUser) || isa<GetElementPtrInst>(UUser) ||
635 isa<PHINode>(UUser) || isa<SelectInst>(UUser)) {
636 Worklist.push_back(UUser);
639 // Use by a load is not an escape.
640 if (isa<LoadInst>(UUser))
642 // Use by a store is not an escape if the use is the address.
643 if (const StoreInst *SI = dyn_cast<StoreInst>(UUser))
644 if (V != SI->getValueOperand())
648 // Regular calls and other stuff are not considered escapes.
651 // Otherwise, conservatively assume an escape.
654 } while (!Worklist.empty());
660 //===----------------------------------------------------------------------===//
661 // ARC AliasAnalysis.
662 //===----------------------------------------------------------------------===//
664 #include "llvm/Analysis/AliasAnalysis.h"
665 #include "llvm/Analysis/Passes.h"
666 #include "llvm/Pass.h"
669 /// ObjCARCAliasAnalysis - This is a simple alias analysis
670 /// implementation that uses knowledge of ARC constructs to answer queries.
672 /// TODO: This class could be generalized to know about other ObjC-specific
673 /// tricks. Such as knowing that ivars in the non-fragile ABI are non-aliasing
674 /// even though their offsets are dynamic.
675 class ObjCARCAliasAnalysis : public ImmutablePass,
676 public AliasAnalysis {
678 static char ID; // Class identification, replacement for typeinfo
679 ObjCARCAliasAnalysis() : ImmutablePass(ID) {
680 initializeObjCARCAliasAnalysisPass(*PassRegistry::getPassRegistry());
684 virtual void initializePass() {
685 InitializeAliasAnalysis(this);
688 /// getAdjustedAnalysisPointer - This method is used when a pass implements
689 /// an analysis interface through multiple inheritance. If needed, it
690 /// should override this to adjust the this pointer as needed for the
691 /// specified pass info.
692 virtual void *getAdjustedAnalysisPointer(const void *PI) {
693 if (PI == &AliasAnalysis::ID)
694 return static_cast<AliasAnalysis *>(this);
698 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
699 virtual AliasResult alias(const Location &LocA, const Location &LocB);
700 virtual bool pointsToConstantMemory(const Location &Loc, bool OrLocal);
701 virtual ModRefBehavior getModRefBehavior(ImmutableCallSite CS);
702 virtual ModRefBehavior getModRefBehavior(const Function *F);
703 virtual ModRefResult getModRefInfo(ImmutableCallSite CS,
704 const Location &Loc);
705 virtual ModRefResult getModRefInfo(ImmutableCallSite CS1,
706 ImmutableCallSite CS2);
708 } // End of anonymous namespace
710 // Register this pass...
711 char ObjCARCAliasAnalysis::ID = 0;
712 INITIALIZE_AG_PASS(ObjCARCAliasAnalysis, AliasAnalysis, "objc-arc-aa",
713 "ObjC-ARC-Based Alias Analysis", false, true, false)
715 ImmutablePass *llvm::createObjCARCAliasAnalysisPass() {
716 return new ObjCARCAliasAnalysis();
720 ObjCARCAliasAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
721 AU.setPreservesAll();
722 AliasAnalysis::getAnalysisUsage(AU);
725 AliasAnalysis::AliasResult
726 ObjCARCAliasAnalysis::alias(const Location &LocA, const Location &LocB) {
728 return AliasAnalysis::alias(LocA, LocB);
730 // First, strip off no-ops, including ObjC-specific no-ops, and try making a
731 // precise alias query.
732 const Value *SA = StripPointerCastsAndObjCCalls(LocA.Ptr);
733 const Value *SB = StripPointerCastsAndObjCCalls(LocB.Ptr);
735 AliasAnalysis::alias(Location(SA, LocA.Size, LocA.TBAATag),
736 Location(SB, LocB.Size, LocB.TBAATag));
737 if (Result != MayAlias)
740 // If that failed, climb to the underlying object, including climbing through
741 // ObjC-specific no-ops, and try making an imprecise alias query.
742 const Value *UA = GetUnderlyingObjCPtr(SA);
743 const Value *UB = GetUnderlyingObjCPtr(SB);
744 if (UA != SA || UB != SB) {
745 Result = AliasAnalysis::alias(Location(UA), Location(UB));
746 // We can't use MustAlias or PartialAlias results here because
747 // GetUnderlyingObjCPtr may return an offsetted pointer value.
748 if (Result == NoAlias)
752 // If that failed, fail. We don't need to chain here, since that's covered
753 // by the earlier precise query.
758 ObjCARCAliasAnalysis::pointsToConstantMemory(const Location &Loc,
761 return AliasAnalysis::pointsToConstantMemory(Loc, OrLocal);
763 // First, strip off no-ops, including ObjC-specific no-ops, and try making
764 // a precise alias query.
765 const Value *S = StripPointerCastsAndObjCCalls(Loc.Ptr);
766 if (AliasAnalysis::pointsToConstantMemory(Location(S, Loc.Size, Loc.TBAATag),
770 // If that failed, climb to the underlying object, including climbing through
771 // ObjC-specific no-ops, and try making an imprecise alias query.
772 const Value *U = GetUnderlyingObjCPtr(S);
774 return AliasAnalysis::pointsToConstantMemory(Location(U), OrLocal);
776 // If that failed, fail. We don't need to chain here, since that's covered
777 // by the earlier precise query.
781 AliasAnalysis::ModRefBehavior
782 ObjCARCAliasAnalysis::getModRefBehavior(ImmutableCallSite CS) {
783 // We have nothing to do. Just chain to the next AliasAnalysis.
784 return AliasAnalysis::getModRefBehavior(CS);
787 AliasAnalysis::ModRefBehavior
788 ObjCARCAliasAnalysis::getModRefBehavior(const Function *F) {
790 return AliasAnalysis::getModRefBehavior(F);
792 switch (GetFunctionClass(F)) {
794 return DoesNotAccessMemory;
799 return AliasAnalysis::getModRefBehavior(F);
802 AliasAnalysis::ModRefResult
803 ObjCARCAliasAnalysis::getModRefInfo(ImmutableCallSite CS, const Location &Loc) {
805 return AliasAnalysis::getModRefInfo(CS, Loc);
807 switch (GetBasicInstructionClass(CS.getInstruction())) {
811 case IC_AutoreleaseRV:
813 case IC_AutoreleasepoolPush:
814 case IC_FusedRetainAutorelease:
815 case IC_FusedRetainAutoreleaseRV:
816 // These functions don't access any memory visible to the compiler.
817 // Note that this doesn't include objc_retainBlock, because it updates
818 // pointers when it copies block data.
824 return AliasAnalysis::getModRefInfo(CS, Loc);
827 AliasAnalysis::ModRefResult
828 ObjCARCAliasAnalysis::getModRefInfo(ImmutableCallSite CS1,
829 ImmutableCallSite CS2) {
830 // TODO: Theoretically we could check for dependencies between objc_* calls
831 // and OnlyAccessesArgumentPointees calls or other well-behaved calls.
832 return AliasAnalysis::getModRefInfo(CS1, CS2);
835 //===----------------------------------------------------------------------===//
837 //===----------------------------------------------------------------------===//
839 #include "llvm/Support/InstIterator.h"
840 #include "llvm/Transforms/Scalar.h"
843 /// ObjCARCExpand - Early ARC transformations.
844 class ObjCARCExpand : public FunctionPass {
845 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
846 virtual bool doInitialization(Module &M);
847 virtual bool runOnFunction(Function &F);
849 /// Run - A flag indicating whether this optimization pass should run.
854 ObjCARCExpand() : FunctionPass(ID) {
855 initializeObjCARCExpandPass(*PassRegistry::getPassRegistry());
860 char ObjCARCExpand::ID = 0;
861 INITIALIZE_PASS(ObjCARCExpand,
862 "objc-arc-expand", "ObjC ARC expansion", false, false)
864 Pass *llvm::createObjCARCExpandPass() {
865 return new ObjCARCExpand();
868 void ObjCARCExpand::getAnalysisUsage(AnalysisUsage &AU) const {
869 AU.setPreservesCFG();
872 bool ObjCARCExpand::doInitialization(Module &M) {
873 Run = ModuleHasARC(M);
877 bool ObjCARCExpand::runOnFunction(Function &F) {
881 // If nothing in the Module uses ARC, don't do anything.
885 bool Changed = false;
887 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ++I) {
888 Instruction *Inst = &*I;
890 DEBUG(dbgs() << "ObjCARCExpand: Visiting: " << *Inst << "\n");
892 switch (GetBasicInstructionClass(Inst)) {
896 case IC_AutoreleaseRV:
897 case IC_FusedRetainAutorelease:
898 case IC_FusedRetainAutoreleaseRV: {
899 // These calls return their argument verbatim, as a low-level
900 // optimization. However, this makes high-level optimizations
901 // harder. Undo any uses of this optimization that the front-end
902 // emitted here. We'll redo them in the contract pass.
904 Value *Value = cast<CallInst>(Inst)->getArgOperand(0);
905 DEBUG(dbgs() << "ObjCARCExpand: Old = " << *Inst << "\n"
906 " New = " << *Value << "\n");
907 Inst->replaceAllUsesWith(Value);
915 DEBUG(dbgs() << "ObjCARCExpand: Finished List.\n\n");
920 //===----------------------------------------------------------------------===//
921 // ARC autorelease pool elimination.
922 //===----------------------------------------------------------------------===//
924 #include "llvm/ADT/STLExtras.h"
925 #include "llvm/IR/Constants.h"
928 /// ObjCARCAPElim - Autorelease pool elimination.
929 class ObjCARCAPElim : public ModulePass {
930 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
931 virtual bool runOnModule(Module &M);
933 static bool MayAutorelease(ImmutableCallSite CS, unsigned Depth = 0);
934 static bool OptimizeBB(BasicBlock *BB);
938 ObjCARCAPElim() : ModulePass(ID) {
939 initializeObjCARCAPElimPass(*PassRegistry::getPassRegistry());
944 char ObjCARCAPElim::ID = 0;
945 INITIALIZE_PASS(ObjCARCAPElim,
947 "ObjC ARC autorelease pool elimination",
950 Pass *llvm::createObjCARCAPElimPass() {
951 return new ObjCARCAPElim();
954 void ObjCARCAPElim::getAnalysisUsage(AnalysisUsage &AU) const {
955 AU.setPreservesCFG();
958 /// MayAutorelease - Interprocedurally determine if calls made by the
959 /// given call site can possibly produce autoreleases.
960 bool ObjCARCAPElim::MayAutorelease(ImmutableCallSite CS, unsigned Depth) {
961 if (const Function *Callee = CS.getCalledFunction()) {
962 if (Callee->isDeclaration() || Callee->mayBeOverridden())
964 for (Function::const_iterator I = Callee->begin(), E = Callee->end();
966 const BasicBlock *BB = I;
967 for (BasicBlock::const_iterator J = BB->begin(), F = BB->end();
969 if (ImmutableCallSite JCS = ImmutableCallSite(J))
970 // This recursion depth limit is arbitrary. It's just great
971 // enough to cover known interesting testcases.
973 !JCS.onlyReadsMemory() &&
974 MayAutorelease(JCS, Depth + 1))
983 bool ObjCARCAPElim::OptimizeBB(BasicBlock *BB) {
984 bool Changed = false;
986 Instruction *Push = 0;
987 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ) {
988 Instruction *Inst = I++;
989 switch (GetBasicInstructionClass(Inst)) {
990 case IC_AutoreleasepoolPush:
993 case IC_AutoreleasepoolPop:
994 // If this pop matches a push and nothing in between can autorelease,
996 if (Push && cast<CallInst>(Inst)->getArgOperand(0) == Push) {
998 DEBUG(dbgs() << "ObjCARCAPElim::OptimizeBB: Zapping push pop autorelease pair:\n"
999 << " Pop: " << *Inst << "\n"
1000 << " Push: " << *Push << "\n");
1001 Inst->eraseFromParent();
1002 Push->eraseFromParent();
1007 if (MayAutorelease(ImmutableCallSite(Inst)))
1018 bool ObjCARCAPElim::runOnModule(Module &M) {
1022 // If nothing in the Module uses ARC, don't do anything.
1023 if (!ModuleHasARC(M))
1026 // Find the llvm.global_ctors variable, as the first step in
1027 // identifying the global constructors. In theory, unnecessary autorelease
1028 // pools could occur anywhere, but in practice it's pretty rare. Global
1029 // ctors are a place where autorelease pools get inserted automatically,
1030 // so it's pretty common for them to be unnecessary, and it's pretty
1031 // profitable to eliminate them.
1032 GlobalVariable *GV = M.getGlobalVariable("llvm.global_ctors");
1036 assert(GV->hasDefinitiveInitializer() &&
1037 "llvm.global_ctors is uncooperative!");
1039 bool Changed = false;
1041 // Dig the constructor functions out of GV's initializer.
1042 ConstantArray *Init = cast<ConstantArray>(GV->getInitializer());
1043 for (User::op_iterator OI = Init->op_begin(), OE = Init->op_end();
1046 // llvm.global_ctors is an array of pairs where the second members
1047 // are constructor functions.
1048 Function *F = dyn_cast<Function>(cast<ConstantStruct>(Op)->getOperand(1));
1049 // If the user used a constructor function with the wrong signature and
1050 // it got bitcasted or whatever, look the other way.
1053 // Only look at function definitions.
1054 if (F->isDeclaration())
1056 // Only look at functions with one basic block.
1057 if (llvm::next(F->begin()) != F->end())
1059 // Ok, a single-block constructor function definition. Try to optimize it.
1060 Changed |= OptimizeBB(F->begin());
1066 //===----------------------------------------------------------------------===//
1067 // ARC optimization.
1068 //===----------------------------------------------------------------------===//
1070 // TODO: On code like this:
1073 // stuff_that_cannot_release()
1074 // objc_autorelease(%x)
1075 // stuff_that_cannot_release()
1077 // stuff_that_cannot_release()
1078 // objc_autorelease(%x)
1080 // The second retain and autorelease can be deleted.
1082 // TODO: It should be possible to delete
1083 // objc_autoreleasePoolPush and objc_autoreleasePoolPop
1084 // pairs if nothing is actually autoreleased between them. Also, autorelease
1085 // calls followed by objc_autoreleasePoolPop calls (perhaps in ObjC++ code
1086 // after inlining) can be turned into plain release calls.
1088 // TODO: Critical-edge splitting. If the optimial insertion point is
1089 // a critical edge, the current algorithm has to fail, because it doesn't
1090 // know how to split edges. It should be possible to make the optimizer
1091 // think in terms of edges, rather than blocks, and then split critical
1094 // TODO: OptimizeSequences could generalized to be Interprocedural.
1096 // TODO: Recognize that a bunch of other objc runtime calls have
1097 // non-escaping arguments and non-releasing arguments, and may be
1098 // non-autoreleasing.
1100 // TODO: Sink autorelease calls as far as possible. Unfortunately we
1101 // usually can't sink them past other calls, which would be the main
1102 // case where it would be useful.
1104 // TODO: The pointer returned from objc_loadWeakRetained is retained.
1106 // TODO: Delete release+retain pairs (rare).
1108 #include "llvm/ADT/SmallPtrSet.h"
1109 #include "llvm/ADT/Statistic.h"
1110 #include "llvm/IR/LLVMContext.h"
1111 #include "llvm/Support/CFG.h"
1113 STATISTIC(NumNoops, "Number of no-op objc calls eliminated");
1114 STATISTIC(NumPartialNoops, "Number of partially no-op objc calls eliminated");
1115 STATISTIC(NumAutoreleases,"Number of autoreleases converted to releases");
1116 STATISTIC(NumRets, "Number of return value forwarding "
1117 "retain+autoreleaes eliminated");
1118 STATISTIC(NumRRs, "Number of retain+release paths eliminated");
1119 STATISTIC(NumPeeps, "Number of calls peephole-optimized");
1122 /// ProvenanceAnalysis - This is similar to BasicAliasAnalysis, and it
1123 /// uses many of the same techniques, except it uses special ObjC-specific
1124 /// reasoning about pointer relationships.
1125 class ProvenanceAnalysis {
1128 typedef std::pair<const Value *, const Value *> ValuePairTy;
1129 typedef DenseMap<ValuePairTy, bool> CachedResultsTy;
1130 CachedResultsTy CachedResults;
1132 bool relatedCheck(const Value *A, const Value *B);
1133 bool relatedSelect(const SelectInst *A, const Value *B);
1134 bool relatedPHI(const PHINode *A, const Value *B);
1136 void operator=(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION;
1137 ProvenanceAnalysis(const ProvenanceAnalysis &) LLVM_DELETED_FUNCTION;
1140 ProvenanceAnalysis() {}
1142 void setAA(AliasAnalysis *aa) { AA = aa; }
1144 AliasAnalysis *getAA() const { return AA; }
1146 bool related(const Value *A, const Value *B);
1149 CachedResults.clear();
1154 bool ProvenanceAnalysis::relatedSelect(const SelectInst *A, const Value *B) {
1155 // If the values are Selects with the same condition, we can do a more precise
1156 // check: just check for relations between the values on corresponding arms.
1157 if (const SelectInst *SB = dyn_cast<SelectInst>(B))
1158 if (A->getCondition() == SB->getCondition())
1159 return related(A->getTrueValue(), SB->getTrueValue()) ||
1160 related(A->getFalseValue(), SB->getFalseValue());
1162 // Check both arms of the Select node individually.
1163 return related(A->getTrueValue(), B) ||
1164 related(A->getFalseValue(), B);
1167 bool ProvenanceAnalysis::relatedPHI(const PHINode *A, const Value *B) {
1168 // If the values are PHIs in the same block, we can do a more precise as well
1169 // as efficient check: just check for relations between the values on
1170 // corresponding edges.
1171 if (const PHINode *PNB = dyn_cast<PHINode>(B))
1172 if (PNB->getParent() == A->getParent()) {
1173 for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i)
1174 if (related(A->getIncomingValue(i),
1175 PNB->getIncomingValueForBlock(A->getIncomingBlock(i))))
1180 // Check each unique source of the PHI node against B.
1181 SmallPtrSet<const Value *, 4> UniqueSrc;
1182 for (unsigned i = 0, e = A->getNumIncomingValues(); i != e; ++i) {
1183 const Value *PV1 = A->getIncomingValue(i);
1184 if (UniqueSrc.insert(PV1) && related(PV1, B))
1188 // All of the arms checked out.
1192 /// isStoredObjCPointer - Test if the value of P, or any value covered by its
1193 /// provenance, is ever stored within the function (not counting callees).
1194 static bool isStoredObjCPointer(const Value *P) {
1195 SmallPtrSet<const Value *, 8> Visited;
1196 SmallVector<const Value *, 8> Worklist;
1197 Worklist.push_back(P);
1200 P = Worklist.pop_back_val();
1201 for (Value::const_use_iterator UI = P->use_begin(), UE = P->use_end();
1203 const User *Ur = *UI;
1204 if (isa<StoreInst>(Ur)) {
1205 if (UI.getOperandNo() == 0)
1206 // The pointer is stored.
1208 // The pointed is stored through.
1211 if (isa<CallInst>(Ur))
1212 // The pointer is passed as an argument, ignore this.
1214 if (isa<PtrToIntInst>(P))
1215 // Assume the worst.
1217 if (Visited.insert(Ur))
1218 Worklist.push_back(Ur);
1220 } while (!Worklist.empty());
1222 // Everything checked out.
1226 bool ProvenanceAnalysis::relatedCheck(const Value *A, const Value *B) {
1227 // Skip past provenance pass-throughs.
1228 A = GetUnderlyingObjCPtr(A);
1229 B = GetUnderlyingObjCPtr(B);
1235 // Ask regular AliasAnalysis, for a first approximation.
1236 switch (AA->alias(A, B)) {
1237 case AliasAnalysis::NoAlias:
1239 case AliasAnalysis::MustAlias:
1240 case AliasAnalysis::PartialAlias:
1242 case AliasAnalysis::MayAlias:
1246 bool AIsIdentified = IsObjCIdentifiedObject(A);
1247 bool BIsIdentified = IsObjCIdentifiedObject(B);
1249 // An ObjC-Identified object can't alias a load if it is never locally stored.
1250 if (AIsIdentified) {
1251 // Check for an obvious escape.
1252 if (isa<LoadInst>(B))
1253 return isStoredObjCPointer(A);
1254 if (BIsIdentified) {
1255 // Check for an obvious escape.
1256 if (isa<LoadInst>(A))
1257 return isStoredObjCPointer(B);
1258 // Both pointers are identified and escapes aren't an evident problem.
1261 } else if (BIsIdentified) {
1262 // Check for an obvious escape.
1263 if (isa<LoadInst>(A))
1264 return isStoredObjCPointer(B);
1267 // Special handling for PHI and Select.
1268 if (const PHINode *PN = dyn_cast<PHINode>(A))
1269 return relatedPHI(PN, B);
1270 if (const PHINode *PN = dyn_cast<PHINode>(B))
1271 return relatedPHI(PN, A);
1272 if (const SelectInst *S = dyn_cast<SelectInst>(A))
1273 return relatedSelect(S, B);
1274 if (const SelectInst *S = dyn_cast<SelectInst>(B))
1275 return relatedSelect(S, A);
1281 bool ProvenanceAnalysis::related(const Value *A, const Value *B) {
1282 // Begin by inserting a conservative value into the map. If the insertion
1283 // fails, we have the answer already. If it succeeds, leave it there until we
1284 // compute the real answer to guard against recursive queries.
1285 if (A > B) std::swap(A, B);
1286 std::pair<CachedResultsTy::iterator, bool> Pair =
1287 CachedResults.insert(std::make_pair(ValuePairTy(A, B), true));
1289 return Pair.first->second;
1291 bool Result = relatedCheck(A, B);
1292 CachedResults[ValuePairTy(A, B)] = Result;
1297 // Sequence - A sequence of states that a pointer may go through in which an
1298 // objc_retain and objc_release are actually needed.
1301 S_Retain, ///< objc_retain(x)
1302 S_CanRelease, ///< foo(x) -- x could possibly see a ref count decrement
1303 S_Use, ///< any use of x
1304 S_Stop, ///< like S_Release, but code motion is stopped
1305 S_Release, ///< objc_release(x)
1306 S_MovableRelease ///< objc_release(x), !clang.imprecise_release
1310 static Sequence MergeSeqs(Sequence A, Sequence B, bool TopDown) {
1314 if (A == S_None || B == S_None)
1317 if (A > B) std::swap(A, B);
1319 // Choose the side which is further along in the sequence.
1320 if ((A == S_Retain || A == S_CanRelease) &&
1321 (B == S_CanRelease || B == S_Use))
1324 // Choose the side which is further along in the sequence.
1325 if ((A == S_Use || A == S_CanRelease) &&
1326 (B == S_Use || B == S_Release || B == S_Stop || B == S_MovableRelease))
1328 // If both sides are releases, choose the more conservative one.
1329 if (A == S_Stop && (B == S_Release || B == S_MovableRelease))
1331 if (A == S_Release && B == S_MovableRelease)
1339 /// RRInfo - Unidirectional information about either a
1340 /// retain-decrement-use-release sequence or release-use-decrement-retain
1341 /// reverese sequence.
1343 /// KnownSafe - After an objc_retain, the reference count of the referenced
1344 /// object is known to be positive. Similarly, before an objc_release, the
1345 /// reference count of the referenced object is known to be positive. If
1346 /// there are retain-release pairs in code regions where the retain count
1347 /// is known to be positive, they can be eliminated, regardless of any side
1348 /// effects between them.
1350 /// Also, a retain+release pair nested within another retain+release
1351 /// pair all on the known same pointer value can be eliminated, regardless
1352 /// of any intervening side effects.
1354 /// KnownSafe is true when either of these conditions is satisfied.
1357 /// IsRetainBlock - True if the Calls are objc_retainBlock calls (as
1358 /// opposed to objc_retain calls).
1361 /// IsTailCallRelease - True of the objc_release calls are all marked
1362 /// with the "tail" keyword.
1363 bool IsTailCallRelease;
1365 /// ReleaseMetadata - If the Calls are objc_release calls and they all have
1366 /// a clang.imprecise_release tag, this is the metadata tag.
1367 MDNode *ReleaseMetadata;
1369 /// Calls - For a top-down sequence, the set of objc_retains or
1370 /// objc_retainBlocks. For bottom-up, the set of objc_releases.
1371 SmallPtrSet<Instruction *, 2> Calls;
1373 /// ReverseInsertPts - The set of optimal insert positions for
1374 /// moving calls in the opposite sequence.
1375 SmallPtrSet<Instruction *, 2> ReverseInsertPts;
1378 KnownSafe(false), IsRetainBlock(false),
1379 IsTailCallRelease(false),
1380 ReleaseMetadata(0) {}
1386 void RRInfo::clear() {
1388 IsRetainBlock = false;
1389 IsTailCallRelease = false;
1390 ReleaseMetadata = 0;
1392 ReverseInsertPts.clear();
1396 /// PtrState - This class summarizes several per-pointer runtime properties
1397 /// which are propogated through the flow graph.
1399 /// KnownPositiveRefCount - True if the reference count is known to
1401 bool KnownPositiveRefCount;
1403 /// Partial - True of we've seen an opportunity for partial RR elimination,
1404 /// such as pushing calls into a CFG triangle or into one side of a
1408 /// Seq - The current position in the sequence.
1412 /// RRI - Unidirectional information about the current sequence.
1413 /// TODO: Encapsulate this better.
1416 PtrState() : KnownPositiveRefCount(false), Partial(false),
1419 void SetKnownPositiveRefCount() {
1420 KnownPositiveRefCount = true;
1423 void ClearRefCount() {
1424 KnownPositiveRefCount = false;
1427 bool IsKnownIncremented() const {
1428 return KnownPositiveRefCount;
1431 void SetSeq(Sequence NewSeq) {
1435 Sequence GetSeq() const {
1439 void ClearSequenceProgress() {
1440 ResetSequenceProgress(S_None);
1443 void ResetSequenceProgress(Sequence NewSeq) {
1449 void Merge(const PtrState &Other, bool TopDown);
1454 PtrState::Merge(const PtrState &Other, bool TopDown) {
1455 Seq = MergeSeqs(Seq, Other.Seq, TopDown);
1456 KnownPositiveRefCount = KnownPositiveRefCount && Other.KnownPositiveRefCount;
1458 // We can't merge a plain objc_retain with an objc_retainBlock.
1459 if (RRI.IsRetainBlock != Other.RRI.IsRetainBlock)
1462 // If we're not in a sequence (anymore), drop all associated state.
1463 if (Seq == S_None) {
1466 } else if (Partial || Other.Partial) {
1467 // If we're doing a merge on a path that's previously seen a partial
1468 // merge, conservatively drop the sequence, to avoid doing partial
1469 // RR elimination. If the branch predicates for the two merge differ,
1470 // mixing them is unsafe.
1471 ClearSequenceProgress();
1473 // Conservatively merge the ReleaseMetadata information.
1474 if (RRI.ReleaseMetadata != Other.RRI.ReleaseMetadata)
1475 RRI.ReleaseMetadata = 0;
1477 RRI.KnownSafe = RRI.KnownSafe && Other.RRI.KnownSafe;
1478 RRI.IsTailCallRelease = RRI.IsTailCallRelease &&
1479 Other.RRI.IsTailCallRelease;
1480 RRI.Calls.insert(Other.RRI.Calls.begin(), Other.RRI.Calls.end());
1482 // Merge the insert point sets. If there are any differences,
1483 // that makes this a partial merge.
1484 Partial = RRI.ReverseInsertPts.size() != Other.RRI.ReverseInsertPts.size();
1485 for (SmallPtrSet<Instruction *, 2>::const_iterator
1486 I = Other.RRI.ReverseInsertPts.begin(),
1487 E = Other.RRI.ReverseInsertPts.end(); I != E; ++I)
1488 Partial |= RRI.ReverseInsertPts.insert(*I);
1493 /// BBState - Per-BasicBlock state.
1495 /// TopDownPathCount - The number of unique control paths from the entry
1496 /// which can reach this block.
1497 unsigned TopDownPathCount;
1499 /// BottomUpPathCount - The number of unique control paths to exits
1500 /// from this block.
1501 unsigned BottomUpPathCount;
1503 /// MapTy - A type for PerPtrTopDown and PerPtrBottomUp.
1504 typedef MapVector<const Value *, PtrState> MapTy;
1506 /// PerPtrTopDown - The top-down traversal uses this to record information
1507 /// known about a pointer at the bottom of each block.
1508 MapTy PerPtrTopDown;
1510 /// PerPtrBottomUp - The bottom-up traversal uses this to record information
1511 /// known about a pointer at the top of each block.
1512 MapTy PerPtrBottomUp;
1514 /// Preds, Succs - Effective successors and predecessors of the current
1515 /// block (this ignores ignorable edges and ignored backedges).
1516 SmallVector<BasicBlock *, 2> Preds;
1517 SmallVector<BasicBlock *, 2> Succs;
1520 BBState() : TopDownPathCount(0), BottomUpPathCount(0) {}
1522 typedef MapTy::iterator ptr_iterator;
1523 typedef MapTy::const_iterator ptr_const_iterator;
1525 ptr_iterator top_down_ptr_begin() { return PerPtrTopDown.begin(); }
1526 ptr_iterator top_down_ptr_end() { return PerPtrTopDown.end(); }
1527 ptr_const_iterator top_down_ptr_begin() const {
1528 return PerPtrTopDown.begin();
1530 ptr_const_iterator top_down_ptr_end() const {
1531 return PerPtrTopDown.end();
1534 ptr_iterator bottom_up_ptr_begin() { return PerPtrBottomUp.begin(); }
1535 ptr_iterator bottom_up_ptr_end() { return PerPtrBottomUp.end(); }
1536 ptr_const_iterator bottom_up_ptr_begin() const {
1537 return PerPtrBottomUp.begin();
1539 ptr_const_iterator bottom_up_ptr_end() const {
1540 return PerPtrBottomUp.end();
1543 /// SetAsEntry - Mark this block as being an entry block, which has one
1544 /// path from the entry by definition.
1545 void SetAsEntry() { TopDownPathCount = 1; }
1547 /// SetAsExit - Mark this block as being an exit block, which has one
1548 /// path to an exit by definition.
1549 void SetAsExit() { BottomUpPathCount = 1; }
1551 PtrState &getPtrTopDownState(const Value *Arg) {
1552 return PerPtrTopDown[Arg];
1555 PtrState &getPtrBottomUpState(const Value *Arg) {
1556 return PerPtrBottomUp[Arg];
1559 void clearBottomUpPointers() {
1560 PerPtrBottomUp.clear();
1563 void clearTopDownPointers() {
1564 PerPtrTopDown.clear();
1567 void InitFromPred(const BBState &Other);
1568 void InitFromSucc(const BBState &Other);
1569 void MergePred(const BBState &Other);
1570 void MergeSucc(const BBState &Other);
1572 /// GetAllPathCount - Return the number of possible unique paths from an
1573 /// entry to an exit which pass through this block. This is only valid
1574 /// after both the top-down and bottom-up traversals are complete.
1575 unsigned GetAllPathCount() const {
1576 assert(TopDownPathCount != 0);
1577 assert(BottomUpPathCount != 0);
1578 return TopDownPathCount * BottomUpPathCount;
1581 // Specialized CFG utilities.
1582 typedef SmallVectorImpl<BasicBlock *>::const_iterator edge_iterator;
1583 edge_iterator pred_begin() { return Preds.begin(); }
1584 edge_iterator pred_end() { return Preds.end(); }
1585 edge_iterator succ_begin() { return Succs.begin(); }
1586 edge_iterator succ_end() { return Succs.end(); }
1588 void addSucc(BasicBlock *Succ) { Succs.push_back(Succ); }
1589 void addPred(BasicBlock *Pred) { Preds.push_back(Pred); }
1591 bool isExit() const { return Succs.empty(); }
1595 void BBState::InitFromPred(const BBState &Other) {
1596 PerPtrTopDown = Other.PerPtrTopDown;
1597 TopDownPathCount = Other.TopDownPathCount;
1600 void BBState::InitFromSucc(const BBState &Other) {
1601 PerPtrBottomUp = Other.PerPtrBottomUp;
1602 BottomUpPathCount = Other.BottomUpPathCount;
1605 /// MergePred - The top-down traversal uses this to merge information about
1606 /// predecessors to form the initial state for a new block.
1607 void BBState::MergePred(const BBState &Other) {
1608 // Other.TopDownPathCount can be 0, in which case it is either dead or a
1609 // loop backedge. Loop backedges are special.
1610 TopDownPathCount += Other.TopDownPathCount;
1612 // Check for overflow. If we have overflow, fall back to conservative behavior.
1613 if (TopDownPathCount < Other.TopDownPathCount) {
1614 clearTopDownPointers();
1618 // For each entry in the other set, if our set has an entry with the same key,
1619 // merge the entries. Otherwise, copy the entry and merge it with an empty
1621 for (ptr_const_iterator MI = Other.top_down_ptr_begin(),
1622 ME = Other.top_down_ptr_end(); MI != ME; ++MI) {
1623 std::pair<ptr_iterator, bool> Pair = PerPtrTopDown.insert(*MI);
1624 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
1628 // For each entry in our set, if the other set doesn't have an entry with the
1629 // same key, force it to merge with an empty entry.
1630 for (ptr_iterator MI = top_down_ptr_begin(),
1631 ME = top_down_ptr_end(); MI != ME; ++MI)
1632 if (Other.PerPtrTopDown.find(MI->first) == Other.PerPtrTopDown.end())
1633 MI->second.Merge(PtrState(), /*TopDown=*/true);
1636 /// MergeSucc - The bottom-up traversal uses this to merge information about
1637 /// successors to form the initial state for a new block.
1638 void BBState::MergeSucc(const BBState &Other) {
1639 // Other.BottomUpPathCount can be 0, in which case it is either dead or a
1640 // loop backedge. Loop backedges are special.
1641 BottomUpPathCount += Other.BottomUpPathCount;
1643 // Check for overflow. If we have overflow, fall back to conservative behavior.
1644 if (BottomUpPathCount < Other.BottomUpPathCount) {
1645 clearBottomUpPointers();
1649 // For each entry in the other set, if our set has an entry with the
1650 // same key, merge the entries. Otherwise, copy the entry and merge
1651 // it with an empty entry.
1652 for (ptr_const_iterator MI = Other.bottom_up_ptr_begin(),
1653 ME = Other.bottom_up_ptr_end(); MI != ME; ++MI) {
1654 std::pair<ptr_iterator, bool> Pair = PerPtrBottomUp.insert(*MI);
1655 Pair.first->second.Merge(Pair.second ? PtrState() : MI->second,
1659 // For each entry in our set, if the other set doesn't have an entry
1660 // with the same key, force it to merge with an empty entry.
1661 for (ptr_iterator MI = bottom_up_ptr_begin(),
1662 ME = bottom_up_ptr_end(); MI != ME; ++MI)
1663 if (Other.PerPtrBottomUp.find(MI->first) == Other.PerPtrBottomUp.end())
1664 MI->second.Merge(PtrState(), /*TopDown=*/false);
1668 /// ObjCARCOpt - The main ARC optimization pass.
1669 class ObjCARCOpt : public FunctionPass {
1671 ProvenanceAnalysis PA;
1673 /// Run - A flag indicating whether this optimization pass should run.
1676 /// RetainRVCallee, etc. - Declarations for ObjC runtime
1677 /// functions, for use in creating calls to them. These are initialized
1678 /// lazily to avoid cluttering up the Module with unused declarations.
1679 Constant *RetainRVCallee, *AutoreleaseRVCallee, *ReleaseCallee,
1680 *RetainCallee, *RetainBlockCallee, *AutoreleaseCallee;
1682 /// UsedInThisFunciton - Flags which determine whether each of the
1683 /// interesting runtine functions is in fact used in the current function.
1684 unsigned UsedInThisFunction;
1686 /// ImpreciseReleaseMDKind - The Metadata Kind for clang.imprecise_release
1688 unsigned ImpreciseReleaseMDKind;
1690 /// CopyOnEscapeMDKind - The Metadata Kind for clang.arc.copy_on_escape
1692 unsigned CopyOnEscapeMDKind;
1694 /// NoObjCARCExceptionsMDKind - The Metadata Kind for
1695 /// clang.arc.no_objc_arc_exceptions metadata.
1696 unsigned NoObjCARCExceptionsMDKind;
1698 Constant *getRetainRVCallee(Module *M);
1699 Constant *getAutoreleaseRVCallee(Module *M);
1700 Constant *getReleaseCallee(Module *M);
1701 Constant *getRetainCallee(Module *M);
1702 Constant *getRetainBlockCallee(Module *M);
1703 Constant *getAutoreleaseCallee(Module *M);
1705 bool IsRetainBlockOptimizable(const Instruction *Inst);
1707 void OptimizeRetainCall(Function &F, Instruction *Retain);
1708 bool OptimizeRetainRVCall(Function &F, Instruction *RetainRV);
1709 void OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV);
1710 void OptimizeIndividualCalls(Function &F);
1712 void CheckForCFGHazards(const BasicBlock *BB,
1713 DenseMap<const BasicBlock *, BBState> &BBStates,
1714 BBState &MyStates) const;
1715 bool VisitInstructionBottomUp(Instruction *Inst,
1717 MapVector<Value *, RRInfo> &Retains,
1719 bool VisitBottomUp(BasicBlock *BB,
1720 DenseMap<const BasicBlock *, BBState> &BBStates,
1721 MapVector<Value *, RRInfo> &Retains);
1722 bool VisitInstructionTopDown(Instruction *Inst,
1723 DenseMap<Value *, RRInfo> &Releases,
1725 bool VisitTopDown(BasicBlock *BB,
1726 DenseMap<const BasicBlock *, BBState> &BBStates,
1727 DenseMap<Value *, RRInfo> &Releases);
1728 bool Visit(Function &F,
1729 DenseMap<const BasicBlock *, BBState> &BBStates,
1730 MapVector<Value *, RRInfo> &Retains,
1731 DenseMap<Value *, RRInfo> &Releases);
1733 void MoveCalls(Value *Arg, RRInfo &RetainsToMove, RRInfo &ReleasesToMove,
1734 MapVector<Value *, RRInfo> &Retains,
1735 DenseMap<Value *, RRInfo> &Releases,
1736 SmallVectorImpl<Instruction *> &DeadInsts,
1739 bool PerformCodePlacement(DenseMap<const BasicBlock *, BBState> &BBStates,
1740 MapVector<Value *, RRInfo> &Retains,
1741 DenseMap<Value *, RRInfo> &Releases,
1744 void OptimizeWeakCalls(Function &F);
1746 bool OptimizeSequences(Function &F);
1748 void OptimizeReturns(Function &F);
1750 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
1751 virtual bool doInitialization(Module &M);
1752 virtual bool runOnFunction(Function &F);
1753 virtual void releaseMemory();
1757 ObjCARCOpt() : FunctionPass(ID) {
1758 initializeObjCARCOptPass(*PassRegistry::getPassRegistry());
1763 char ObjCARCOpt::ID = 0;
1764 INITIALIZE_PASS_BEGIN(ObjCARCOpt,
1765 "objc-arc", "ObjC ARC optimization", false, false)
1766 INITIALIZE_PASS_DEPENDENCY(ObjCARCAliasAnalysis)
1767 INITIALIZE_PASS_END(ObjCARCOpt,
1768 "objc-arc", "ObjC ARC optimization", false, false)
1770 Pass *llvm::createObjCARCOptPass() {
1771 return new ObjCARCOpt();
1774 void ObjCARCOpt::getAnalysisUsage(AnalysisUsage &AU) const {
1775 AU.addRequired<ObjCARCAliasAnalysis>();
1776 AU.addRequired<AliasAnalysis>();
1777 // ARC optimization doesn't currently split critical edges.
1778 AU.setPreservesCFG();
1781 bool ObjCARCOpt::IsRetainBlockOptimizable(const Instruction *Inst) {
1782 // Without the magic metadata tag, we have to assume this might be an
1783 // objc_retainBlock call inserted to convert a block pointer to an id,
1784 // in which case it really is needed.
1785 if (!Inst->getMetadata(CopyOnEscapeMDKind))
1788 // If the pointer "escapes" (not including being used in a call),
1789 // the copy may be needed.
1790 if (DoesObjCBlockEscape(Inst))
1793 // Otherwise, it's not needed.
1797 Constant *ObjCARCOpt::getRetainRVCallee(Module *M) {
1798 if (!RetainRVCallee) {
1799 LLVMContext &C = M->getContext();
1800 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1801 Type *Params[] = { I8X };
1802 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1803 AttributeSet Attribute =
1804 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
1805 Attribute::get(C, Attribute::NoUnwind));
1807 M->getOrInsertFunction("objc_retainAutoreleasedReturnValue", FTy,
1810 return RetainRVCallee;
1813 Constant *ObjCARCOpt::getAutoreleaseRVCallee(Module *M) {
1814 if (!AutoreleaseRVCallee) {
1815 LLVMContext &C = M->getContext();
1816 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
1817 Type *Params[] = { I8X };
1818 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
1819 AttributeSet Attribute =
1820 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
1821 Attribute::get(C, Attribute::NoUnwind));
1822 AutoreleaseRVCallee =
1823 M->getOrInsertFunction("objc_autoreleaseReturnValue", FTy,
1826 return AutoreleaseRVCallee;
1829 Constant *ObjCARCOpt::getReleaseCallee(Module *M) {
1830 if (!ReleaseCallee) {
1831 LLVMContext &C = M->getContext();
1832 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1833 AttributeSet Attribute =
1834 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
1835 Attribute::get(C, Attribute::NoUnwind));
1837 M->getOrInsertFunction(
1839 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
1842 return ReleaseCallee;
1845 Constant *ObjCARCOpt::getRetainCallee(Module *M) {
1846 if (!RetainCallee) {
1847 LLVMContext &C = M->getContext();
1848 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1849 AttributeSet Attribute =
1850 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
1851 Attribute::get(C, Attribute::NoUnwind));
1853 M->getOrInsertFunction(
1855 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1858 return RetainCallee;
1861 Constant *ObjCARCOpt::getRetainBlockCallee(Module *M) {
1862 if (!RetainBlockCallee) {
1863 LLVMContext &C = M->getContext();
1864 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1865 // objc_retainBlock is not nounwind because it calls user copy constructors
1866 // which could theoretically throw.
1868 M->getOrInsertFunction(
1870 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1873 return RetainBlockCallee;
1876 Constant *ObjCARCOpt::getAutoreleaseCallee(Module *M) {
1877 if (!AutoreleaseCallee) {
1878 LLVMContext &C = M->getContext();
1879 Type *Params[] = { PointerType::getUnqual(Type::getInt8Ty(C)) };
1880 AttributeSet Attribute =
1881 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
1882 Attribute::get(C, Attribute::NoUnwind));
1884 M->getOrInsertFunction(
1886 FunctionType::get(Params[0], Params, /*isVarArg=*/false),
1889 return AutoreleaseCallee;
1892 /// IsPotentialUse - Test whether the given value is possible a
1893 /// reference-counted pointer, including tests which utilize AliasAnalysis.
1894 static bool IsPotentialUse(const Value *Op, AliasAnalysis &AA) {
1895 // First make the rudimentary check.
1896 if (!IsPotentialUse(Op))
1899 // Objects in constant memory are not reference-counted.
1900 if (AA.pointsToConstantMemory(Op))
1903 // Pointers in constant memory are not pointing to reference-counted objects.
1904 if (const LoadInst *LI = dyn_cast<LoadInst>(Op))
1905 if (AA.pointsToConstantMemory(LI->getPointerOperand()))
1908 // Otherwise assume the worst.
1912 /// CanAlterRefCount - Test whether the given instruction can result in a
1913 /// reference count modification (positive or negative) for the pointer's
1916 CanAlterRefCount(const Instruction *Inst, const Value *Ptr,
1917 ProvenanceAnalysis &PA, InstructionClass Class) {
1919 case IC_Autorelease:
1920 case IC_AutoreleaseRV:
1922 // These operations never directly modify a reference count.
1927 ImmutableCallSite CS = static_cast<const Value *>(Inst);
1928 assert(CS && "Only calls can alter reference counts!");
1930 // See if AliasAnalysis can help us with the call.
1931 AliasAnalysis::ModRefBehavior MRB = PA.getAA()->getModRefBehavior(CS);
1932 if (AliasAnalysis::onlyReadsMemory(MRB))
1934 if (AliasAnalysis::onlyAccessesArgPointees(MRB)) {
1935 for (ImmutableCallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
1937 const Value *Op = *I;
1938 if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
1944 // Assume the worst.
1948 /// CanUse - Test whether the given instruction can "use" the given pointer's
1949 /// object in a way that requires the reference count to be positive.
1951 CanUse(const Instruction *Inst, const Value *Ptr, ProvenanceAnalysis &PA,
1952 InstructionClass Class) {
1953 // IC_Call operations (as opposed to IC_CallOrUser) never "use" objc pointers.
1954 if (Class == IC_Call)
1957 // Consider various instructions which may have pointer arguments which are
1959 if (const ICmpInst *ICI = dyn_cast<ICmpInst>(Inst)) {
1960 // Comparing a pointer with null, or any other constant, isn't really a use,
1961 // because we don't care what the pointer points to, or about the values
1962 // of any other dynamic reference-counted pointers.
1963 if (!IsPotentialUse(ICI->getOperand(1), *PA.getAA()))
1965 } else if (ImmutableCallSite CS = static_cast<const Value *>(Inst)) {
1966 // For calls, just check the arguments (and not the callee operand).
1967 for (ImmutableCallSite::arg_iterator OI = CS.arg_begin(),
1968 OE = CS.arg_end(); OI != OE; ++OI) {
1969 const Value *Op = *OI;
1970 if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
1974 } else if (const StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
1975 // Special-case stores, because we don't care about the stored value, just
1976 // the store address.
1977 const Value *Op = GetUnderlyingObjCPtr(SI->getPointerOperand());
1978 // If we can't tell what the underlying object was, assume there is a
1980 return IsPotentialUse(Op, *PA.getAA()) && PA.related(Op, Ptr);
1983 // Check each operand for a match.
1984 for (User::const_op_iterator OI = Inst->op_begin(), OE = Inst->op_end();
1986 const Value *Op = *OI;
1987 if (IsPotentialUse(Op, *PA.getAA()) && PA.related(Ptr, Op))
1993 /// CanInterruptRV - Test whether the given instruction can autorelease
1994 /// any pointer or cause an autoreleasepool pop.
1996 CanInterruptRV(InstructionClass Class) {
1998 case IC_AutoreleasepoolPop:
2001 case IC_Autorelease:
2002 case IC_AutoreleaseRV:
2003 case IC_FusedRetainAutorelease:
2004 case IC_FusedRetainAutoreleaseRV:
2012 /// DependenceKind - There are several kinds of dependence-like concepts in
2014 enum DependenceKind {
2015 NeedsPositiveRetainCount,
2016 AutoreleasePoolBoundary,
2017 CanChangeRetainCount,
2018 RetainAutoreleaseDep, ///< Blocks objc_retainAutorelease.
2019 RetainAutoreleaseRVDep, ///< Blocks objc_retainAutoreleaseReturnValue.
2020 RetainRVDep ///< Blocks objc_retainAutoreleasedReturnValue.
2024 /// Depends - Test if there can be dependencies on Inst through Arg. This
2025 /// function only tests dependencies relevant for removing pairs of calls.
2027 Depends(DependenceKind Flavor, Instruction *Inst, const Value *Arg,
2028 ProvenanceAnalysis &PA) {
2029 // If we've reached the definition of Arg, stop.
2034 case NeedsPositiveRetainCount: {
2035 InstructionClass Class = GetInstructionClass(Inst);
2037 case IC_AutoreleasepoolPop:
2038 case IC_AutoreleasepoolPush:
2042 return CanUse(Inst, Arg, PA, Class);
2046 case AutoreleasePoolBoundary: {
2047 InstructionClass Class = GetInstructionClass(Inst);
2049 case IC_AutoreleasepoolPop:
2050 case IC_AutoreleasepoolPush:
2051 // These mark the end and begin of an autorelease pool scope.
2054 // Nothing else does this.
2059 case CanChangeRetainCount: {
2060 InstructionClass Class = GetInstructionClass(Inst);
2062 case IC_AutoreleasepoolPop:
2063 // Conservatively assume this can decrement any count.
2065 case IC_AutoreleasepoolPush:
2069 return CanAlterRefCount(Inst, Arg, PA, Class);
2073 case RetainAutoreleaseDep:
2074 switch (GetBasicInstructionClass(Inst)) {
2075 case IC_AutoreleasepoolPop:
2076 case IC_AutoreleasepoolPush:
2077 // Don't merge an objc_autorelease with an objc_retain inside a different
2078 // autoreleasepool scope.
2082 // Check for a retain of the same pointer for merging.
2083 return GetObjCArg(Inst) == Arg;
2085 // Nothing else matters for objc_retainAutorelease formation.
2089 case RetainAutoreleaseRVDep: {
2090 InstructionClass Class = GetBasicInstructionClass(Inst);
2094 // Check for a retain of the same pointer for merging.
2095 return GetObjCArg(Inst) == Arg;
2097 // Anything that can autorelease interrupts
2098 // retainAutoreleaseReturnValue formation.
2099 return CanInterruptRV(Class);
2104 return CanInterruptRV(GetBasicInstructionClass(Inst));
2107 llvm_unreachable("Invalid dependence flavor");
2110 /// FindDependencies - Walk up the CFG from StartPos (which is in StartBB) and
2111 /// find local and non-local dependencies on Arg.
2112 /// TODO: Cache results?
2114 FindDependencies(DependenceKind Flavor,
2116 BasicBlock *StartBB, Instruction *StartInst,
2117 SmallPtrSet<Instruction *, 4> &DependingInstructions,
2118 SmallPtrSet<const BasicBlock *, 4> &Visited,
2119 ProvenanceAnalysis &PA) {
2120 BasicBlock::iterator StartPos = StartInst;
2122 SmallVector<std::pair<BasicBlock *, BasicBlock::iterator>, 4> Worklist;
2123 Worklist.push_back(std::make_pair(StartBB, StartPos));
2125 std::pair<BasicBlock *, BasicBlock::iterator> Pair =
2126 Worklist.pop_back_val();
2127 BasicBlock *LocalStartBB = Pair.first;
2128 BasicBlock::iterator LocalStartPos = Pair.second;
2129 BasicBlock::iterator StartBBBegin = LocalStartBB->begin();
2131 if (LocalStartPos == StartBBBegin) {
2132 pred_iterator PI(LocalStartBB), PE(LocalStartBB, false);
2134 // If we've reached the function entry, produce a null dependence.
2135 DependingInstructions.insert(0);
2137 // Add the predecessors to the worklist.
2139 BasicBlock *PredBB = *PI;
2140 if (Visited.insert(PredBB))
2141 Worklist.push_back(std::make_pair(PredBB, PredBB->end()));
2142 } while (++PI != PE);
2146 Instruction *Inst = --LocalStartPos;
2147 if (Depends(Flavor, Inst, Arg, PA)) {
2148 DependingInstructions.insert(Inst);
2152 } while (!Worklist.empty());
2154 // Determine whether the original StartBB post-dominates all of the blocks we
2155 // visited. If not, insert a sentinal indicating that most optimizations are
2157 for (SmallPtrSet<const BasicBlock *, 4>::const_iterator I = Visited.begin(),
2158 E = Visited.end(); I != E; ++I) {
2159 const BasicBlock *BB = *I;
2162 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
2163 for (succ_const_iterator SI(TI), SE(TI, false); SI != SE; ++SI) {
2164 const BasicBlock *Succ = *SI;
2165 if (Succ != StartBB && !Visited.count(Succ)) {
2166 DependingInstructions.insert(reinterpret_cast<Instruction *>(-1));
2173 static bool isNullOrUndef(const Value *V) {
2174 return isa<ConstantPointerNull>(V) || isa<UndefValue>(V);
2177 static bool isNoopInstruction(const Instruction *I) {
2178 return isa<BitCastInst>(I) ||
2179 (isa<GetElementPtrInst>(I) &&
2180 cast<GetElementPtrInst>(I)->hasAllZeroIndices());
2183 /// OptimizeRetainCall - Turn objc_retain into
2184 /// objc_retainAutoreleasedReturnValue if the operand is a return value.
2186 ObjCARCOpt::OptimizeRetainCall(Function &F, Instruction *Retain) {
2187 ImmutableCallSite CS(GetObjCArg(Retain));
2188 const Instruction *Call = CS.getInstruction();
2190 if (Call->getParent() != Retain->getParent()) return;
2192 // Check that the call is next to the retain.
2193 BasicBlock::const_iterator I = Call;
2195 while (isNoopInstruction(I)) ++I;
2199 // Turn it to an objc_retainAutoreleasedReturnValue..
2203 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainCall: Transforming "
2204 "objc_retainAutoreleasedReturnValue => "
2205 "objc_retain since the operand is not a return value.\n"
2207 << *Retain << "\n");
2209 cast<CallInst>(Retain)->setCalledFunction(getRetainRVCallee(F.getParent()));
2211 DEBUG(dbgs() << " New: "
2212 << *Retain << "\n");
2215 /// OptimizeRetainRVCall - Turn objc_retainAutoreleasedReturnValue into
2216 /// objc_retain if the operand is not a return value. Or, if it can be paired
2217 /// with an objc_autoreleaseReturnValue, delete the pair and return true.
2219 ObjCARCOpt::OptimizeRetainRVCall(Function &F, Instruction *RetainRV) {
2220 // Check for the argument being from an immediately preceding call or invoke.
2221 const Value *Arg = GetObjCArg(RetainRV);
2222 ImmutableCallSite CS(Arg);
2223 if (const Instruction *Call = CS.getInstruction()) {
2224 if (Call->getParent() == RetainRV->getParent()) {
2225 BasicBlock::const_iterator I = Call;
2227 while (isNoopInstruction(I)) ++I;
2228 if (&*I == RetainRV)
2230 } else if (const InvokeInst *II = dyn_cast<InvokeInst>(Call)) {
2231 BasicBlock *RetainRVParent = RetainRV->getParent();
2232 if (II->getNormalDest() == RetainRVParent) {
2233 BasicBlock::const_iterator I = RetainRVParent->begin();
2234 while (isNoopInstruction(I)) ++I;
2235 if (&*I == RetainRV)
2241 // Check for being preceded by an objc_autoreleaseReturnValue on the same
2242 // pointer. In this case, we can delete the pair.
2243 BasicBlock::iterator I = RetainRV, Begin = RetainRV->getParent()->begin();
2245 do --I; while (I != Begin && isNoopInstruction(I));
2246 if (GetBasicInstructionClass(I) == IC_AutoreleaseRV &&
2247 GetObjCArg(I) == Arg) {
2251 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Erasing " << *I << "\n"
2252 << " Erasing " << *RetainRV
2255 EraseInstruction(I);
2256 EraseInstruction(RetainRV);
2261 // Turn it to a plain objc_retain.
2265 DEBUG(dbgs() << "ObjCARCOpt::OptimizeRetainRVCall: Transforming "
2266 "objc_retainAutoreleasedReturnValue => "
2267 "objc_retain since the operand is not a return value.\n"
2269 << *RetainRV << "\n");
2271 cast<CallInst>(RetainRV)->setCalledFunction(getRetainCallee(F.getParent()));
2273 DEBUG(dbgs() << " New: "
2274 << *RetainRV << "\n");
2279 /// OptimizeAutoreleaseRVCall - Turn objc_autoreleaseReturnValue into
2280 /// objc_autorelease if the result is not used as a return value.
2282 ObjCARCOpt::OptimizeAutoreleaseRVCall(Function &F, Instruction *AutoreleaseRV) {
2283 // Check for a return of the pointer value.
2284 const Value *Ptr = GetObjCArg(AutoreleaseRV);
2285 SmallVector<const Value *, 2> Users;
2286 Users.push_back(Ptr);
2288 Ptr = Users.pop_back_val();
2289 for (Value::const_use_iterator UI = Ptr->use_begin(), UE = Ptr->use_end();
2291 const User *I = *UI;
2292 if (isa<ReturnInst>(I) || GetBasicInstructionClass(I) == IC_RetainRV)
2294 if (isa<BitCastInst>(I))
2297 } while (!Users.empty());
2301 cast<CallInst>(AutoreleaseRV)->
2302 setCalledFunction(getAutoreleaseCallee(F.getParent()));
2305 /// OptimizeIndividualCalls - Visit each call, one at a time, and make
2306 /// simplifications without doing any additional analysis.
2307 void ObjCARCOpt::OptimizeIndividualCalls(Function &F) {
2308 // Reset all the flags in preparation for recomputing them.
2309 UsedInThisFunction = 0;
2311 // Visit all objc_* calls in F.
2312 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
2313 Instruction *Inst = &*I++;
2315 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Visiting: " <<
2318 InstructionClass Class = GetBasicInstructionClass(Inst);
2323 // Delete no-op casts. These function calls have special semantics, but
2324 // the semantics are entirely implemented via lowering in the front-end,
2325 // so by the time they reach the optimizer, they are just no-op calls
2326 // which return their argument.
2328 // There are gray areas here, as the ability to cast reference-counted
2329 // pointers to raw void* and back allows code to break ARC assumptions,
2330 // however these are currently considered to be unimportant.
2334 EraseInstruction(Inst);
2337 // If the pointer-to-weak-pointer is null, it's undefined behavior.
2340 case IC_LoadWeakRetained:
2342 case IC_DestroyWeak: {
2343 CallInst *CI = cast<CallInst>(Inst);
2344 if (isNullOrUndef(CI->getArgOperand(0))) {
2346 Type *Ty = CI->getArgOperand(0)->getType();
2347 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
2348 Constant::getNullValue(Ty),
2350 CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
2351 CI->eraseFromParent();
2358 CallInst *CI = cast<CallInst>(Inst);
2359 if (isNullOrUndef(CI->getArgOperand(0)) ||
2360 isNullOrUndef(CI->getArgOperand(1))) {
2362 Type *Ty = CI->getArgOperand(0)->getType();
2363 new StoreInst(UndefValue::get(cast<PointerType>(Ty)->getElementType()),
2364 Constant::getNullValue(Ty),
2366 CI->replaceAllUsesWith(UndefValue::get(CI->getType()));
2367 CI->eraseFromParent();
2373 OptimizeRetainCall(F, Inst);
2376 if (OptimizeRetainRVCall(F, Inst))
2379 case IC_AutoreleaseRV:
2380 OptimizeAutoreleaseRVCall(F, Inst);
2384 // objc_autorelease(x) -> objc_release(x) if x is otherwise unused.
2385 if (IsAutorelease(Class) && Inst->use_empty()) {
2386 CallInst *Call = cast<CallInst>(Inst);
2387 const Value *Arg = Call->getArgOperand(0);
2388 Arg = FindSingleUseIdentifiedObject(Arg);
2393 // Create the declaration lazily.
2394 LLVMContext &C = Inst->getContext();
2396 CallInst::Create(getReleaseCallee(F.getParent()),
2397 Call->getArgOperand(0), "", Call);
2398 NewCall->setMetadata(ImpreciseReleaseMDKind,
2399 MDNode::get(C, ArrayRef<Value *>()));
2400 EraseInstruction(Call);
2406 // For functions which can never be passed stack arguments, add
2408 if (IsAlwaysTail(Class)) {
2410 cast<CallInst>(Inst)->setTailCall();
2413 // Set nounwind as needed.
2414 if (IsNoThrow(Class)) {
2416 cast<CallInst>(Inst)->setDoesNotThrow();
2419 if (!IsNoopOnNull(Class)) {
2420 UsedInThisFunction |= 1 << Class;
2424 const Value *Arg = GetObjCArg(Inst);
2426 // ARC calls with null are no-ops. Delete them.
2427 if (isNullOrUndef(Arg)) {
2430 EraseInstruction(Inst);
2434 // Keep track of which of retain, release, autorelease, and retain_block
2435 // are actually present in this function.
2436 UsedInThisFunction |= 1 << Class;
2438 // If Arg is a PHI, and one or more incoming values to the
2439 // PHI are null, and the call is control-equivalent to the PHI, and there
2440 // are no relevant side effects between the PHI and the call, the call
2441 // could be pushed up to just those paths with non-null incoming values.
2442 // For now, don't bother splitting critical edges for this.
2443 SmallVector<std::pair<Instruction *, const Value *>, 4> Worklist;
2444 Worklist.push_back(std::make_pair(Inst, Arg));
2446 std::pair<Instruction *, const Value *> Pair = Worklist.pop_back_val();
2450 const PHINode *PN = dyn_cast<PHINode>(Arg);
2453 // Determine if the PHI has any null operands, or any incoming
2455 bool HasNull = false;
2456 bool HasCriticalEdges = false;
2457 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2459 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
2460 if (isNullOrUndef(Incoming))
2462 else if (cast<TerminatorInst>(PN->getIncomingBlock(i)->back())
2463 .getNumSuccessors() != 1) {
2464 HasCriticalEdges = true;
2468 // If we have null operands and no critical edges, optimize.
2469 if (!HasCriticalEdges && HasNull) {
2470 SmallPtrSet<Instruction *, 4> DependingInstructions;
2471 SmallPtrSet<const BasicBlock *, 4> Visited;
2473 // Check that there is nothing that cares about the reference
2474 // count between the call and the phi.
2477 case IC_RetainBlock:
2478 // These can always be moved up.
2481 // These can't be moved across things that care about the retain
2483 FindDependencies(NeedsPositiveRetainCount, Arg,
2484 Inst->getParent(), Inst,
2485 DependingInstructions, Visited, PA);
2487 case IC_Autorelease:
2488 // These can't be moved across autorelease pool scope boundaries.
2489 FindDependencies(AutoreleasePoolBoundary, Arg,
2490 Inst->getParent(), Inst,
2491 DependingInstructions, Visited, PA);
2494 case IC_AutoreleaseRV:
2495 // Don't move these; the RV optimization depends on the autoreleaseRV
2496 // being tail called, and the retainRV being immediately after a call
2497 // (which might still happen if we get lucky with codegen layout, but
2498 // it's not worth taking the chance).
2501 llvm_unreachable("Invalid dependence flavor");
2504 if (DependingInstructions.size() == 1 &&
2505 *DependingInstructions.begin() == PN) {
2508 // Clone the call into each predecessor that has a non-null value.
2509 CallInst *CInst = cast<CallInst>(Inst);
2510 Type *ParamTy = CInst->getArgOperand(0)->getType();
2511 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
2513 StripPointerCastsAndObjCCalls(PN->getIncomingValue(i));
2514 if (!isNullOrUndef(Incoming)) {
2515 CallInst *Clone = cast<CallInst>(CInst->clone());
2516 Value *Op = PN->getIncomingValue(i);
2517 Instruction *InsertPos = &PN->getIncomingBlock(i)->back();
2518 if (Op->getType() != ParamTy)
2519 Op = new BitCastInst(Op, ParamTy, "", InsertPos);
2520 Clone->setArgOperand(0, Op);
2521 Clone->insertBefore(InsertPos);
2522 Worklist.push_back(std::make_pair(Clone, Incoming));
2525 // Erase the original call.
2526 EraseInstruction(CInst);
2530 } while (!Worklist.empty());
2532 DEBUG(dbgs() << "ObjCARCOpt::OptimizeIndividualCalls: Finished Queue.\n\n");
2537 /// CheckForCFGHazards - Check for critical edges, loop boundaries, irreducible
2538 /// control flow, or other CFG structures where moving code across the edge
2539 /// would result in it being executed more.
2541 ObjCARCOpt::CheckForCFGHazards(const BasicBlock *BB,
2542 DenseMap<const BasicBlock *, BBState> &BBStates,
2543 BBState &MyStates) const {
2544 // If any top-down local-use or possible-dec has a succ which is earlier in
2545 // the sequence, forget it.
2546 for (BBState::ptr_iterator I = MyStates.top_down_ptr_begin(),
2547 E = MyStates.top_down_ptr_end(); I != E; ++I)
2548 switch (I->second.GetSeq()) {
2551 const Value *Arg = I->first;
2552 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
2553 bool SomeSuccHasSame = false;
2554 bool AllSuccsHaveSame = true;
2555 PtrState &S = I->second;
2556 succ_const_iterator SI(TI), SE(TI, false);
2558 // If the terminator is an invoke marked with the
2559 // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
2560 // ignored, for ARC purposes.
2561 if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
2564 for (; SI != SE; ++SI) {
2565 Sequence SuccSSeq = S_None;
2566 bool SuccSRRIKnownSafe = false;
2567 // If VisitBottomUp has pointer information for this successor, take
2568 // what we know about it.
2569 DenseMap<const BasicBlock *, BBState>::iterator BBI =
2571 assert(BBI != BBStates.end());
2572 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
2573 SuccSSeq = SuccS.GetSeq();
2574 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
2577 case S_CanRelease: {
2578 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
2579 S.ClearSequenceProgress();
2585 SomeSuccHasSame = true;
2589 case S_MovableRelease:
2590 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
2591 AllSuccsHaveSame = false;
2594 llvm_unreachable("bottom-up pointer in retain state!");
2597 // If the state at the other end of any of the successor edges
2598 // matches the current state, require all edges to match. This
2599 // guards against loops in the middle of a sequence.
2600 if (SomeSuccHasSame && !AllSuccsHaveSame)
2601 S.ClearSequenceProgress();
2604 case S_CanRelease: {
2605 const Value *Arg = I->first;
2606 const TerminatorInst *TI = cast<TerminatorInst>(&BB->back());
2607 bool SomeSuccHasSame = false;
2608 bool AllSuccsHaveSame = true;
2609 PtrState &S = I->second;
2610 succ_const_iterator SI(TI), SE(TI, false);
2612 // If the terminator is an invoke marked with the
2613 // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
2614 // ignored, for ARC purposes.
2615 if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
2618 for (; SI != SE; ++SI) {
2619 Sequence SuccSSeq = S_None;
2620 bool SuccSRRIKnownSafe = false;
2621 // If VisitBottomUp has pointer information for this successor, take
2622 // what we know about it.
2623 DenseMap<const BasicBlock *, BBState>::iterator BBI =
2625 assert(BBI != BBStates.end());
2626 const PtrState &SuccS = BBI->second.getPtrBottomUpState(Arg);
2627 SuccSSeq = SuccS.GetSeq();
2628 SuccSRRIKnownSafe = SuccS.RRI.KnownSafe;
2631 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe) {
2632 S.ClearSequenceProgress();
2638 SomeSuccHasSame = true;
2642 case S_MovableRelease:
2644 if (!S.RRI.KnownSafe && !SuccSRRIKnownSafe)
2645 AllSuccsHaveSame = false;
2648 llvm_unreachable("bottom-up pointer in retain state!");
2651 // If the state at the other end of any of the successor edges
2652 // matches the current state, require all edges to match. This
2653 // guards against loops in the middle of a sequence.
2654 if (SomeSuccHasSame && !AllSuccsHaveSame)
2655 S.ClearSequenceProgress();
2662 ObjCARCOpt::VisitInstructionBottomUp(Instruction *Inst,
2664 MapVector<Value *, RRInfo> &Retains,
2665 BBState &MyStates) {
2666 bool NestingDetected = false;
2667 InstructionClass Class = GetInstructionClass(Inst);
2668 const Value *Arg = 0;
2672 Arg = GetObjCArg(Inst);
2674 PtrState &S = MyStates.getPtrBottomUpState(Arg);
2676 // If we see two releases in a row on the same pointer. If so, make
2677 // a note, and we'll cicle back to revisit it after we've
2678 // hopefully eliminated the second release, which may allow us to
2679 // eliminate the first release too.
2680 // Theoretically we could implement removal of nested retain+release
2681 // pairs by making PtrState hold a stack of states, but this is
2682 // simple and avoids adding overhead for the non-nested case.
2683 if (S.GetSeq() == S_Release || S.GetSeq() == S_MovableRelease)
2684 NestingDetected = true;
2686 MDNode *ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2687 S.ResetSequenceProgress(ReleaseMetadata ? S_MovableRelease : S_Release);
2688 S.RRI.ReleaseMetadata = ReleaseMetadata;
2689 S.RRI.KnownSafe = S.IsKnownIncremented();
2690 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
2691 S.RRI.Calls.insert(Inst);
2693 S.SetKnownPositiveRefCount();
2696 case IC_RetainBlock:
2697 // An objc_retainBlock call with just a use may need to be kept,
2698 // because it may be copying a block from the stack to the heap.
2699 if (!IsRetainBlockOptimizable(Inst))
2704 Arg = GetObjCArg(Inst);
2706 PtrState &S = MyStates.getPtrBottomUpState(Arg);
2707 S.SetKnownPositiveRefCount();
2709 switch (S.GetSeq()) {
2712 case S_MovableRelease:
2714 S.RRI.ReverseInsertPts.clear();
2717 // Don't do retain+release tracking for IC_RetainRV, because it's
2718 // better to let it remain as the first instruction after a call.
2719 if (Class != IC_RetainRV) {
2720 S.RRI.IsRetainBlock = Class == IC_RetainBlock;
2721 Retains[Inst] = S.RRI;
2723 S.ClearSequenceProgress();
2728 llvm_unreachable("bottom-up pointer in retain state!");
2730 return NestingDetected;
2732 case IC_AutoreleasepoolPop:
2733 // Conservatively, clear MyStates for all known pointers.
2734 MyStates.clearBottomUpPointers();
2735 return NestingDetected;
2736 case IC_AutoreleasepoolPush:
2738 // These are irrelevant.
2739 return NestingDetected;
2744 // Consider any other possible effects of this instruction on each
2745 // pointer being tracked.
2746 for (BBState::ptr_iterator MI = MyStates.bottom_up_ptr_begin(),
2747 ME = MyStates.bottom_up_ptr_end(); MI != ME; ++MI) {
2748 const Value *Ptr = MI->first;
2750 continue; // Handled above.
2751 PtrState &S = MI->second;
2752 Sequence Seq = S.GetSeq();
2754 // Check for possible releases.
2755 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2759 S.SetSeq(S_CanRelease);
2763 case S_MovableRelease:
2768 llvm_unreachable("bottom-up pointer in retain state!");
2772 // Check for possible direct uses.
2775 case S_MovableRelease:
2776 if (CanUse(Inst, Ptr, PA, Class)) {
2777 assert(S.RRI.ReverseInsertPts.empty());
2778 // If this is an invoke instruction, we're scanning it as part of
2779 // one of its successor blocks, since we can't insert code after it
2780 // in its own block, and we don't want to split critical edges.
2781 if (isa<InvokeInst>(Inst))
2782 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
2784 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
2786 } else if (Seq == S_Release &&
2787 (Class == IC_User || Class == IC_CallOrUser)) {
2788 // Non-movable releases depend on any possible objc pointer use.
2790 assert(S.RRI.ReverseInsertPts.empty());
2791 // As above; handle invoke specially.
2792 if (isa<InvokeInst>(Inst))
2793 S.RRI.ReverseInsertPts.insert(BB->getFirstInsertionPt());
2795 S.RRI.ReverseInsertPts.insert(llvm::next(BasicBlock::iterator(Inst)));
2799 if (CanUse(Inst, Ptr, PA, Class))
2807 llvm_unreachable("bottom-up pointer in retain state!");
2811 return NestingDetected;
2815 ObjCARCOpt::VisitBottomUp(BasicBlock *BB,
2816 DenseMap<const BasicBlock *, BBState> &BBStates,
2817 MapVector<Value *, RRInfo> &Retains) {
2818 bool NestingDetected = false;
2819 BBState &MyStates = BBStates[BB];
2821 // Merge the states from each successor to compute the initial state
2822 // for the current block.
2823 BBState::edge_iterator SI(MyStates.succ_begin()),
2824 SE(MyStates.succ_end());
2826 const BasicBlock *Succ = *SI;
2827 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Succ);
2828 assert(I != BBStates.end());
2829 MyStates.InitFromSucc(I->second);
2831 for (; SI != SE; ++SI) {
2833 I = BBStates.find(Succ);
2834 assert(I != BBStates.end());
2835 MyStates.MergeSucc(I->second);
2839 // Visit all the instructions, bottom-up.
2840 for (BasicBlock::iterator I = BB->end(), E = BB->begin(); I != E; --I) {
2841 Instruction *Inst = llvm::prior(I);
2843 // Invoke instructions are visited as part of their successors (below).
2844 if (isa<InvokeInst>(Inst))
2847 NestingDetected |= VisitInstructionBottomUp(Inst, BB, Retains, MyStates);
2850 // If there's a predecessor with an invoke, visit the invoke as if it were
2851 // part of this block, since we can't insert code after an invoke in its own
2852 // block, and we don't want to split critical edges.
2853 for (BBState::edge_iterator PI(MyStates.pred_begin()),
2854 PE(MyStates.pred_end()); PI != PE; ++PI) {
2855 BasicBlock *Pred = *PI;
2856 if (InvokeInst *II = dyn_cast<InvokeInst>(&Pred->back()))
2857 NestingDetected |= VisitInstructionBottomUp(II, BB, Retains, MyStates);
2860 return NestingDetected;
2864 ObjCARCOpt::VisitInstructionTopDown(Instruction *Inst,
2865 DenseMap<Value *, RRInfo> &Releases,
2866 BBState &MyStates) {
2867 bool NestingDetected = false;
2868 InstructionClass Class = GetInstructionClass(Inst);
2869 const Value *Arg = 0;
2872 case IC_RetainBlock:
2873 // An objc_retainBlock call with just a use may need to be kept,
2874 // because it may be copying a block from the stack to the heap.
2875 if (!IsRetainBlockOptimizable(Inst))
2880 Arg = GetObjCArg(Inst);
2882 PtrState &S = MyStates.getPtrTopDownState(Arg);
2884 // Don't do retain+release tracking for IC_RetainRV, because it's
2885 // better to let it remain as the first instruction after a call.
2886 if (Class != IC_RetainRV) {
2887 // If we see two retains in a row on the same pointer. If so, make
2888 // a note, and we'll cicle back to revisit it after we've
2889 // hopefully eliminated the second retain, which may allow us to
2890 // eliminate the first retain too.
2891 // Theoretically we could implement removal of nested retain+release
2892 // pairs by making PtrState hold a stack of states, but this is
2893 // simple and avoids adding overhead for the non-nested case.
2894 if (S.GetSeq() == S_Retain)
2895 NestingDetected = true;
2897 S.ResetSequenceProgress(S_Retain);
2898 S.RRI.IsRetainBlock = Class == IC_RetainBlock;
2899 S.RRI.KnownSafe = S.IsKnownIncremented();
2900 S.RRI.Calls.insert(Inst);
2903 S.SetKnownPositiveRefCount();
2905 // A retain can be a potential use; procede to the generic checking
2910 Arg = GetObjCArg(Inst);
2912 PtrState &S = MyStates.getPtrTopDownState(Arg);
2915 switch (S.GetSeq()) {
2918 S.RRI.ReverseInsertPts.clear();
2921 S.RRI.ReleaseMetadata = Inst->getMetadata(ImpreciseReleaseMDKind);
2922 S.RRI.IsTailCallRelease = cast<CallInst>(Inst)->isTailCall();
2923 Releases[Inst] = S.RRI;
2924 S.ClearSequenceProgress();
2930 case S_MovableRelease:
2931 llvm_unreachable("top-down pointer in release state!");
2935 case IC_AutoreleasepoolPop:
2936 // Conservatively, clear MyStates for all known pointers.
2937 MyStates.clearTopDownPointers();
2938 return NestingDetected;
2939 case IC_AutoreleasepoolPush:
2941 // These are irrelevant.
2942 return NestingDetected;
2947 // Consider any other possible effects of this instruction on each
2948 // pointer being tracked.
2949 for (BBState::ptr_iterator MI = MyStates.top_down_ptr_begin(),
2950 ME = MyStates.top_down_ptr_end(); MI != ME; ++MI) {
2951 const Value *Ptr = MI->first;
2953 continue; // Handled above.
2954 PtrState &S = MI->second;
2955 Sequence Seq = S.GetSeq();
2957 // Check for possible releases.
2958 if (CanAlterRefCount(Inst, Ptr, PA, Class)) {
2962 S.SetSeq(S_CanRelease);
2963 assert(S.RRI.ReverseInsertPts.empty());
2964 S.RRI.ReverseInsertPts.insert(Inst);
2966 // One call can't cause a transition from S_Retain to S_CanRelease
2967 // and S_CanRelease to S_Use. If we've made the first transition,
2976 case S_MovableRelease:
2977 llvm_unreachable("top-down pointer in release state!");
2981 // Check for possible direct uses.
2984 if (CanUse(Inst, Ptr, PA, Class))
2993 case S_MovableRelease:
2994 llvm_unreachable("top-down pointer in release state!");
2998 return NestingDetected;
3002 ObjCARCOpt::VisitTopDown(BasicBlock *BB,
3003 DenseMap<const BasicBlock *, BBState> &BBStates,
3004 DenseMap<Value *, RRInfo> &Releases) {
3005 bool NestingDetected = false;
3006 BBState &MyStates = BBStates[BB];
3008 // Merge the states from each predecessor to compute the initial state
3009 // for the current block.
3010 BBState::edge_iterator PI(MyStates.pred_begin()),
3011 PE(MyStates.pred_end());
3013 const BasicBlock *Pred = *PI;
3014 DenseMap<const BasicBlock *, BBState>::iterator I = BBStates.find(Pred);
3015 assert(I != BBStates.end());
3016 MyStates.InitFromPred(I->second);
3018 for (; PI != PE; ++PI) {
3020 I = BBStates.find(Pred);
3021 assert(I != BBStates.end());
3022 MyStates.MergePred(I->second);
3026 // Visit all the instructions, top-down.
3027 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
3028 Instruction *Inst = I;
3029 NestingDetected |= VisitInstructionTopDown(Inst, Releases, MyStates);
3032 CheckForCFGHazards(BB, BBStates, MyStates);
3033 return NestingDetected;
3037 ComputePostOrders(Function &F,
3038 SmallVectorImpl<BasicBlock *> &PostOrder,
3039 SmallVectorImpl<BasicBlock *> &ReverseCFGPostOrder,
3040 unsigned NoObjCARCExceptionsMDKind,
3041 DenseMap<const BasicBlock *, BBState> &BBStates) {
3042 /// Visited - The visited set, for doing DFS walks.
3043 SmallPtrSet<BasicBlock *, 16> Visited;
3045 // Do DFS, computing the PostOrder.
3046 SmallPtrSet<BasicBlock *, 16> OnStack;
3047 SmallVector<std::pair<BasicBlock *, succ_iterator>, 16> SuccStack;
3049 // Functions always have exactly one entry block, and we don't have
3050 // any other block that we treat like an entry block.
3051 BasicBlock *EntryBB = &F.getEntryBlock();
3052 BBState &MyStates = BBStates[EntryBB];
3053 MyStates.SetAsEntry();
3054 TerminatorInst *EntryTI = cast<TerminatorInst>(&EntryBB->back());
3055 SuccStack.push_back(std::make_pair(EntryBB, succ_iterator(EntryTI)));
3056 Visited.insert(EntryBB);
3057 OnStack.insert(EntryBB);
3060 BasicBlock *CurrBB = SuccStack.back().first;
3061 TerminatorInst *TI = cast<TerminatorInst>(&CurrBB->back());
3062 succ_iterator SE(TI, false);
3064 // If the terminator is an invoke marked with the
3065 // clang.arc.no_objc_arc_exceptions metadata, the unwind edge can be
3066 // ignored, for ARC purposes.
3067 if (isa<InvokeInst>(TI) && TI->getMetadata(NoObjCARCExceptionsMDKind))
3070 while (SuccStack.back().second != SE) {
3071 BasicBlock *SuccBB = *SuccStack.back().second++;
3072 if (Visited.insert(SuccBB)) {
3073 TerminatorInst *TI = cast<TerminatorInst>(&SuccBB->back());
3074 SuccStack.push_back(std::make_pair(SuccBB, succ_iterator(TI)));
3075 BBStates[CurrBB].addSucc(SuccBB);
3076 BBState &SuccStates = BBStates[SuccBB];
3077 SuccStates.addPred(CurrBB);
3078 OnStack.insert(SuccBB);
3082 if (!OnStack.count(SuccBB)) {
3083 BBStates[CurrBB].addSucc(SuccBB);
3084 BBStates[SuccBB].addPred(CurrBB);
3087 OnStack.erase(CurrBB);
3088 PostOrder.push_back(CurrBB);
3089 SuccStack.pop_back();
3090 } while (!SuccStack.empty());
3094 // Do reverse-CFG DFS, computing the reverse-CFG PostOrder.
3095 // Functions may have many exits, and there also blocks which we treat
3096 // as exits due to ignored edges.
3097 SmallVector<std::pair<BasicBlock *, BBState::edge_iterator>, 16> PredStack;
3098 for (Function::iterator I = F.begin(), E = F.end(); I != E; ++I) {
3099 BasicBlock *ExitBB = I;
3100 BBState &MyStates = BBStates[ExitBB];
3101 if (!MyStates.isExit())
3104 MyStates.SetAsExit();
3106 PredStack.push_back(std::make_pair(ExitBB, MyStates.pred_begin()));
3107 Visited.insert(ExitBB);
3108 while (!PredStack.empty()) {
3109 reverse_dfs_next_succ:
3110 BBState::edge_iterator PE = BBStates[PredStack.back().first].pred_end();
3111 while (PredStack.back().second != PE) {
3112 BasicBlock *BB = *PredStack.back().second++;
3113 if (Visited.insert(BB)) {
3114 PredStack.push_back(std::make_pair(BB, BBStates[BB].pred_begin()));
3115 goto reverse_dfs_next_succ;
3118 ReverseCFGPostOrder.push_back(PredStack.pop_back_val().first);
3123 // Visit - Visit the function both top-down and bottom-up.
3125 ObjCARCOpt::Visit(Function &F,
3126 DenseMap<const BasicBlock *, BBState> &BBStates,
3127 MapVector<Value *, RRInfo> &Retains,
3128 DenseMap<Value *, RRInfo> &Releases) {
3130 // Use reverse-postorder traversals, because we magically know that loops
3131 // will be well behaved, i.e. they won't repeatedly call retain on a single
3132 // pointer without doing a release. We can't use the ReversePostOrderTraversal
3133 // class here because we want the reverse-CFG postorder to consider each
3134 // function exit point, and we want to ignore selected cycle edges.
3135 SmallVector<BasicBlock *, 16> PostOrder;
3136 SmallVector<BasicBlock *, 16> ReverseCFGPostOrder;
3137 ComputePostOrders(F, PostOrder, ReverseCFGPostOrder,
3138 NoObjCARCExceptionsMDKind,
3141 // Use reverse-postorder on the reverse CFG for bottom-up.
3142 bool BottomUpNestingDetected = false;
3143 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
3144 ReverseCFGPostOrder.rbegin(), E = ReverseCFGPostOrder.rend();
3146 BottomUpNestingDetected |= VisitBottomUp(*I, BBStates, Retains);
3148 // Use reverse-postorder for top-down.
3149 bool TopDownNestingDetected = false;
3150 for (SmallVectorImpl<BasicBlock *>::const_reverse_iterator I =
3151 PostOrder.rbegin(), E = PostOrder.rend();
3153 TopDownNestingDetected |= VisitTopDown(*I, BBStates, Releases);
3155 return TopDownNestingDetected && BottomUpNestingDetected;
3158 /// MoveCalls - Move the calls in RetainsToMove and ReleasesToMove.
3159 void ObjCARCOpt::MoveCalls(Value *Arg,
3160 RRInfo &RetainsToMove,
3161 RRInfo &ReleasesToMove,
3162 MapVector<Value *, RRInfo> &Retains,
3163 DenseMap<Value *, RRInfo> &Releases,
3164 SmallVectorImpl<Instruction *> &DeadInsts,
3166 Type *ArgTy = Arg->getType();
3167 Type *ParamTy = PointerType::getUnqual(Type::getInt8Ty(ArgTy->getContext()));
3169 // Insert the new retain and release calls.
3170 for (SmallPtrSet<Instruction *, 2>::const_iterator
3171 PI = ReleasesToMove.ReverseInsertPts.begin(),
3172 PE = ReleasesToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
3173 Instruction *InsertPt = *PI;
3174 Value *MyArg = ArgTy == ParamTy ? Arg :
3175 new BitCastInst(Arg, ParamTy, "", InsertPt);
3177 CallInst::Create(RetainsToMove.IsRetainBlock ?
3178 getRetainBlockCallee(M) : getRetainCallee(M),
3179 MyArg, "", InsertPt);
3180 Call->setDoesNotThrow();
3181 if (RetainsToMove.IsRetainBlock)
3182 Call->setMetadata(CopyOnEscapeMDKind,
3183 MDNode::get(M->getContext(), ArrayRef<Value *>()));
3185 Call->setTailCall();
3187 for (SmallPtrSet<Instruction *, 2>::const_iterator
3188 PI = RetainsToMove.ReverseInsertPts.begin(),
3189 PE = RetainsToMove.ReverseInsertPts.end(); PI != PE; ++PI) {
3190 Instruction *InsertPt = *PI;
3191 Value *MyArg = ArgTy == ParamTy ? Arg :
3192 new BitCastInst(Arg, ParamTy, "", InsertPt);
3193 CallInst *Call = CallInst::Create(getReleaseCallee(M), MyArg,
3195 // Attach a clang.imprecise_release metadata tag, if appropriate.
3196 if (MDNode *M = ReleasesToMove.ReleaseMetadata)
3197 Call->setMetadata(ImpreciseReleaseMDKind, M);
3198 Call->setDoesNotThrow();
3199 if (ReleasesToMove.IsTailCallRelease)
3200 Call->setTailCall();
3203 // Delete the original retain and release calls.
3204 for (SmallPtrSet<Instruction *, 2>::const_iterator
3205 AI = RetainsToMove.Calls.begin(),
3206 AE = RetainsToMove.Calls.end(); AI != AE; ++AI) {
3207 Instruction *OrigRetain = *AI;
3208 Retains.blot(OrigRetain);
3209 DeadInsts.push_back(OrigRetain);
3211 for (SmallPtrSet<Instruction *, 2>::const_iterator
3212 AI = ReleasesToMove.Calls.begin(),
3213 AE = ReleasesToMove.Calls.end(); AI != AE; ++AI) {
3214 Instruction *OrigRelease = *AI;
3215 Releases.erase(OrigRelease);
3216 DeadInsts.push_back(OrigRelease);
3220 /// PerformCodePlacement - Identify pairings between the retains and releases,
3221 /// and delete and/or move them.
3223 ObjCARCOpt::PerformCodePlacement(DenseMap<const BasicBlock *, BBState>
3225 MapVector<Value *, RRInfo> &Retains,
3226 DenseMap<Value *, RRInfo> &Releases,
3228 bool AnyPairsCompletelyEliminated = false;
3229 RRInfo RetainsToMove;
3230 RRInfo ReleasesToMove;
3231 SmallVector<Instruction *, 4> NewRetains;
3232 SmallVector<Instruction *, 4> NewReleases;
3233 SmallVector<Instruction *, 8> DeadInsts;
3235 // Visit each retain.
3236 for (MapVector<Value *, RRInfo>::const_iterator I = Retains.begin(),
3237 E = Retains.end(); I != E; ++I) {
3238 Value *V = I->first;
3239 if (!V) continue; // blotted
3241 Instruction *Retain = cast<Instruction>(V);
3242 Value *Arg = GetObjCArg(Retain);
3244 // If the object being released is in static or stack storage, we know it's
3245 // not being managed by ObjC reference counting, so we can delete pairs
3246 // regardless of what possible decrements or uses lie between them.
3247 bool KnownSafe = isa<Constant>(Arg) || isa<AllocaInst>(Arg);
3249 // A constant pointer can't be pointing to an object on the heap. It may
3250 // be reference-counted, but it won't be deleted.
3251 if (const LoadInst *LI = dyn_cast<LoadInst>(Arg))
3252 if (const GlobalVariable *GV =
3253 dyn_cast<GlobalVariable>(
3254 StripPointerCastsAndObjCCalls(LI->getPointerOperand())))
3255 if (GV->isConstant())
3258 // If a pair happens in a region where it is known that the reference count
3259 // is already incremented, we can similarly ignore possible decrements.
3260 bool KnownSafeTD = true, KnownSafeBU = true;
3262 // Connect the dots between the top-down-collected RetainsToMove and
3263 // bottom-up-collected ReleasesToMove to form sets of related calls.
3264 // This is an iterative process so that we connect multiple releases
3265 // to multiple retains if needed.
3266 unsigned OldDelta = 0;
3267 unsigned NewDelta = 0;
3268 unsigned OldCount = 0;
3269 unsigned NewCount = 0;
3270 bool FirstRelease = true;
3271 bool FirstRetain = true;
3272 NewRetains.push_back(Retain);
3274 for (SmallVectorImpl<Instruction *>::const_iterator
3275 NI = NewRetains.begin(), NE = NewRetains.end(); NI != NE; ++NI) {
3276 Instruction *NewRetain = *NI;
3277 MapVector<Value *, RRInfo>::const_iterator It = Retains.find(NewRetain);
3278 assert(It != Retains.end());
3279 const RRInfo &NewRetainRRI = It->second;
3280 KnownSafeTD &= NewRetainRRI.KnownSafe;
3281 for (SmallPtrSet<Instruction *, 2>::const_iterator
3282 LI = NewRetainRRI.Calls.begin(),
3283 LE = NewRetainRRI.Calls.end(); LI != LE; ++LI) {
3284 Instruction *NewRetainRelease = *LI;
3285 DenseMap<Value *, RRInfo>::const_iterator Jt =
3286 Releases.find(NewRetainRelease);
3287 if (Jt == Releases.end())
3289 const RRInfo &NewRetainReleaseRRI = Jt->second;
3290 assert(NewRetainReleaseRRI.Calls.count(NewRetain));
3291 if (ReleasesToMove.Calls.insert(NewRetainRelease)) {
3293 BBStates[NewRetainRelease->getParent()].GetAllPathCount();
3295 // Merge the ReleaseMetadata and IsTailCallRelease values.
3297 ReleasesToMove.ReleaseMetadata =
3298 NewRetainReleaseRRI.ReleaseMetadata;
3299 ReleasesToMove.IsTailCallRelease =
3300 NewRetainReleaseRRI.IsTailCallRelease;
3301 FirstRelease = false;
3303 if (ReleasesToMove.ReleaseMetadata !=
3304 NewRetainReleaseRRI.ReleaseMetadata)
3305 ReleasesToMove.ReleaseMetadata = 0;
3306 if (ReleasesToMove.IsTailCallRelease !=
3307 NewRetainReleaseRRI.IsTailCallRelease)
3308 ReleasesToMove.IsTailCallRelease = false;
3311 // Collect the optimal insertion points.
3313 for (SmallPtrSet<Instruction *, 2>::const_iterator
3314 RI = NewRetainReleaseRRI.ReverseInsertPts.begin(),
3315 RE = NewRetainReleaseRRI.ReverseInsertPts.end();
3317 Instruction *RIP = *RI;
3318 if (ReleasesToMove.ReverseInsertPts.insert(RIP))
3319 NewDelta -= BBStates[RIP->getParent()].GetAllPathCount();
3321 NewReleases.push_back(NewRetainRelease);
3326 if (NewReleases.empty()) break;
3328 // Back the other way.
3329 for (SmallVectorImpl<Instruction *>::const_iterator
3330 NI = NewReleases.begin(), NE = NewReleases.end(); NI != NE; ++NI) {
3331 Instruction *NewRelease = *NI;
3332 DenseMap<Value *, RRInfo>::const_iterator It =
3333 Releases.find(NewRelease);
3334 assert(It != Releases.end());
3335 const RRInfo &NewReleaseRRI = It->second;
3336 KnownSafeBU &= NewReleaseRRI.KnownSafe;
3337 for (SmallPtrSet<Instruction *, 2>::const_iterator
3338 LI = NewReleaseRRI.Calls.begin(),
3339 LE = NewReleaseRRI.Calls.end(); LI != LE; ++LI) {
3340 Instruction *NewReleaseRetain = *LI;
3341 MapVector<Value *, RRInfo>::const_iterator Jt =
3342 Retains.find(NewReleaseRetain);
3343 if (Jt == Retains.end())
3345 const RRInfo &NewReleaseRetainRRI = Jt->second;
3346 assert(NewReleaseRetainRRI.Calls.count(NewRelease));
3347 if (RetainsToMove.Calls.insert(NewReleaseRetain)) {
3348 unsigned PathCount =
3349 BBStates[NewReleaseRetain->getParent()].GetAllPathCount();
3350 OldDelta += PathCount;
3351 OldCount += PathCount;
3353 // Merge the IsRetainBlock values.
3355 RetainsToMove.IsRetainBlock = NewReleaseRetainRRI.IsRetainBlock;
3356 FirstRetain = false;
3357 } else if (ReleasesToMove.IsRetainBlock !=
3358 NewReleaseRetainRRI.IsRetainBlock)
3359 // It's not possible to merge the sequences if one uses
3360 // objc_retain and the other uses objc_retainBlock.
3363 // Collect the optimal insertion points.
3365 for (SmallPtrSet<Instruction *, 2>::const_iterator
3366 RI = NewReleaseRetainRRI.ReverseInsertPts.begin(),
3367 RE = NewReleaseRetainRRI.ReverseInsertPts.end();
3369 Instruction *RIP = *RI;
3370 if (RetainsToMove.ReverseInsertPts.insert(RIP)) {
3371 PathCount = BBStates[RIP->getParent()].GetAllPathCount();
3372 NewDelta += PathCount;
3373 NewCount += PathCount;
3376 NewRetains.push_back(NewReleaseRetain);
3380 NewReleases.clear();
3381 if (NewRetains.empty()) break;
3384 // If the pointer is known incremented or nested, we can safely delete the
3385 // pair regardless of what's between them.
3386 if (KnownSafeTD || KnownSafeBU) {
3387 RetainsToMove.ReverseInsertPts.clear();
3388 ReleasesToMove.ReverseInsertPts.clear();
3391 // Determine whether the new insertion points we computed preserve the
3392 // balance of retain and release calls through the program.
3393 // TODO: If the fully aggressive solution isn't valid, try to find a
3394 // less aggressive solution which is.
3399 // Determine whether the original call points are balanced in the retain and
3400 // release calls through the program. If not, conservatively don't touch
3402 // TODO: It's theoretically possible to do code motion in this case, as
3403 // long as the existing imbalances are maintained.
3407 // Ok, everything checks out and we're all set. Let's move some code!
3409 assert(OldCount != 0 && "Unreachable code?");
3410 AnyPairsCompletelyEliminated = NewCount == 0;
3411 NumRRs += OldCount - NewCount;
3412 MoveCalls(Arg, RetainsToMove, ReleasesToMove,
3413 Retains, Releases, DeadInsts, M);
3416 NewReleases.clear();
3418 RetainsToMove.clear();
3419 ReleasesToMove.clear();
3422 // Now that we're done moving everything, we can delete the newly dead
3423 // instructions, as we no longer need them as insert points.
3424 while (!DeadInsts.empty())
3425 EraseInstruction(DeadInsts.pop_back_val());
3427 return AnyPairsCompletelyEliminated;
3430 /// OptimizeWeakCalls - Weak pointer optimizations.
3431 void ObjCARCOpt::OptimizeWeakCalls(Function &F) {
3432 // First, do memdep-style RLE and S2L optimizations. We can't use memdep
3433 // itself because it uses AliasAnalysis and we need to do provenance
3435 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3436 Instruction *Inst = &*I++;
3438 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Visiting: " << *Inst <<
3441 InstructionClass Class = GetBasicInstructionClass(Inst);
3442 if (Class != IC_LoadWeak && Class != IC_LoadWeakRetained)
3445 // Delete objc_loadWeak calls with no users.
3446 if (Class == IC_LoadWeak && Inst->use_empty()) {
3447 Inst->eraseFromParent();
3451 // TODO: For now, just look for an earlier available version of this value
3452 // within the same block. Theoretically, we could do memdep-style non-local
3453 // analysis too, but that would want caching. A better approach would be to
3454 // use the technique that EarlyCSE uses.
3455 inst_iterator Current = llvm::prior(I);
3456 BasicBlock *CurrentBB = Current.getBasicBlockIterator();
3457 for (BasicBlock::iterator B = CurrentBB->begin(),
3458 J = Current.getInstructionIterator();
3460 Instruction *EarlierInst = &*llvm::prior(J);
3461 InstructionClass EarlierClass = GetInstructionClass(EarlierInst);
3462 switch (EarlierClass) {
3464 case IC_LoadWeakRetained: {
3465 // If this is loading from the same pointer, replace this load's value
3467 CallInst *Call = cast<CallInst>(Inst);
3468 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
3469 Value *Arg = Call->getArgOperand(0);
3470 Value *EarlierArg = EarlierCall->getArgOperand(0);
3471 switch (PA.getAA()->alias(Arg, EarlierArg)) {
3472 case AliasAnalysis::MustAlias:
3474 // If the load has a builtin retain, insert a plain retain for it.
3475 if (Class == IC_LoadWeakRetained) {
3477 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
3481 // Zap the fully redundant load.
3482 Call->replaceAllUsesWith(EarlierCall);
3483 Call->eraseFromParent();
3485 case AliasAnalysis::MayAlias:
3486 case AliasAnalysis::PartialAlias:
3488 case AliasAnalysis::NoAlias:
3495 // If this is storing to the same pointer and has the same size etc.
3496 // replace this load's value with the stored value.
3497 CallInst *Call = cast<CallInst>(Inst);
3498 CallInst *EarlierCall = cast<CallInst>(EarlierInst);
3499 Value *Arg = Call->getArgOperand(0);
3500 Value *EarlierArg = EarlierCall->getArgOperand(0);
3501 switch (PA.getAA()->alias(Arg, EarlierArg)) {
3502 case AliasAnalysis::MustAlias:
3504 // If the load has a builtin retain, insert a plain retain for it.
3505 if (Class == IC_LoadWeakRetained) {
3507 CallInst::Create(getRetainCallee(F.getParent()), EarlierCall,
3511 // Zap the fully redundant load.
3512 Call->replaceAllUsesWith(EarlierCall->getArgOperand(1));
3513 Call->eraseFromParent();
3515 case AliasAnalysis::MayAlias:
3516 case AliasAnalysis::PartialAlias:
3518 case AliasAnalysis::NoAlias:
3525 // TOOD: Grab the copied value.
3527 case IC_AutoreleasepoolPush:
3530 // Weak pointers are only modified through the weak entry points
3531 // (and arbitrary calls, which could call the weak entry points).
3534 // Anything else could modify the weak pointer.
3541 // Then, for each destroyWeak with an alloca operand, check to see if
3542 // the alloca and all its users can be zapped.
3543 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
3544 Instruction *Inst = &*I++;
3545 InstructionClass Class = GetBasicInstructionClass(Inst);
3546 if (Class != IC_DestroyWeak)
3549 CallInst *Call = cast<CallInst>(Inst);
3550 Value *Arg = Call->getArgOperand(0);
3551 if (AllocaInst *Alloca = dyn_cast<AllocaInst>(Arg)) {
3552 for (Value::use_iterator UI = Alloca->use_begin(),
3553 UE = Alloca->use_end(); UI != UE; ++UI) {
3554 const Instruction *UserInst = cast<Instruction>(*UI);
3555 switch (GetBasicInstructionClass(UserInst)) {
3558 case IC_DestroyWeak:
3565 for (Value::use_iterator UI = Alloca->use_begin(),
3566 UE = Alloca->use_end(); UI != UE; ) {
3567 CallInst *UserInst = cast<CallInst>(*UI++);
3568 switch (GetBasicInstructionClass(UserInst)) {
3571 // These functions return their second argument.
3572 UserInst->replaceAllUsesWith(UserInst->getArgOperand(1));
3574 case IC_DestroyWeak:
3578 llvm_unreachable("alloca really is used!");
3580 UserInst->eraseFromParent();
3582 Alloca->eraseFromParent();
3587 DEBUG(dbgs() << "ObjCARCOpt::OptimizeWeakCalls: Finished List.\n\n");
3591 /// OptimizeSequences - Identify program paths which execute sequences of
3592 /// retains and releases which can be eliminated.
3593 bool ObjCARCOpt::OptimizeSequences(Function &F) {
3594 /// Releases, Retains - These are used to store the results of the main flow
3595 /// analysis. These use Value* as the key instead of Instruction* so that the
3596 /// map stays valid when we get around to rewriting code and calls get
3597 /// replaced by arguments.
3598 DenseMap<Value *, RRInfo> Releases;
3599 MapVector<Value *, RRInfo> Retains;
3601 /// BBStates, This is used during the traversal of the function to track the
3602 /// states for each identified object at each block.
3603 DenseMap<const BasicBlock *, BBState> BBStates;
3605 // Analyze the CFG of the function, and all instructions.
3606 bool NestingDetected = Visit(F, BBStates, Retains, Releases);
3609 return PerformCodePlacement(BBStates, Retains, Releases, F.getParent()) &&
3613 /// OptimizeReturns - Look for this pattern:
3615 /// %call = call i8* @something(...)
3616 /// %2 = call i8* @objc_retain(i8* %call)
3617 /// %3 = call i8* @objc_autorelease(i8* %2)
3620 /// And delete the retain and autorelease.
3622 /// Otherwise if it's just this:
3624 /// %3 = call i8* @objc_autorelease(i8* %2)
3627 /// convert the autorelease to autoreleaseRV.
3628 void ObjCARCOpt::OptimizeReturns(Function &F) {
3629 if (!F.getReturnType()->isPointerTy())
3632 SmallPtrSet<Instruction *, 4> DependingInstructions;
3633 SmallPtrSet<const BasicBlock *, 4> Visited;
3634 for (Function::iterator FI = F.begin(), FE = F.end(); FI != FE; ++FI) {
3635 BasicBlock *BB = FI;
3636 ReturnInst *Ret = dyn_cast<ReturnInst>(&BB->back());
3638 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Visiting: " << *Ret << "\n");
3642 const Value *Arg = StripPointerCastsAndObjCCalls(Ret->getOperand(0));
3643 FindDependencies(NeedsPositiveRetainCount, Arg,
3644 BB, Ret, DependingInstructions, Visited, PA);
3645 if (DependingInstructions.size() != 1)
3649 CallInst *Autorelease =
3650 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
3653 InstructionClass AutoreleaseClass = GetBasicInstructionClass(Autorelease);
3654 if (!IsAutorelease(AutoreleaseClass))
3656 if (GetObjCArg(Autorelease) != Arg)
3659 DependingInstructions.clear();
3662 // Check that there is nothing that can affect the reference
3663 // count between the autorelease and the retain.
3664 FindDependencies(CanChangeRetainCount, Arg,
3665 BB, Autorelease, DependingInstructions, Visited, PA);
3666 if (DependingInstructions.size() != 1)
3671 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
3673 // Check that we found a retain with the same argument.
3675 !IsRetain(GetBasicInstructionClass(Retain)) ||
3676 GetObjCArg(Retain) != Arg)
3679 DependingInstructions.clear();
3682 // Convert the autorelease to an autoreleaseRV, since it's
3683 // returning the value.
3684 if (AutoreleaseClass == IC_Autorelease) {
3685 Autorelease->setCalledFunction(getAutoreleaseRVCallee(F.getParent()));
3686 AutoreleaseClass = IC_AutoreleaseRV;
3689 // Check that there is nothing that can affect the reference
3690 // count between the retain and the call.
3691 // Note that Retain need not be in BB.
3692 FindDependencies(CanChangeRetainCount, Arg, Retain->getParent(), Retain,
3693 DependingInstructions, Visited, PA);
3694 if (DependingInstructions.size() != 1)
3699 dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
3701 // Check that the pointer is the return value of the call.
3702 if (!Call || Arg != Call)
3705 // Check that the call is a regular call.
3706 InstructionClass Class = GetBasicInstructionClass(Call);
3707 if (Class != IC_CallOrUser && Class != IC_Call)
3710 // If so, we can zap the retain and autorelease.
3713 EraseInstruction(Retain);
3714 EraseInstruction(Autorelease);
3720 DependingInstructions.clear();
3724 DEBUG(dbgs() << "ObjCARCOpt::OptimizeReturns: Finished List.\n\n");
3728 bool ObjCARCOpt::doInitialization(Module &M) {
3732 // If nothing in the Module uses ARC, don't do anything.
3733 Run = ModuleHasARC(M);
3737 // Identify the imprecise release metadata kind.
3738 ImpreciseReleaseMDKind =
3739 M.getContext().getMDKindID("clang.imprecise_release");
3740 CopyOnEscapeMDKind =
3741 M.getContext().getMDKindID("clang.arc.copy_on_escape");
3742 NoObjCARCExceptionsMDKind =
3743 M.getContext().getMDKindID("clang.arc.no_objc_arc_exceptions");
3745 // Intuitively, objc_retain and others are nocapture, however in practice
3746 // they are not, because they return their argument value. And objc_release
3747 // calls finalizers which can have arbitrary side effects.
3749 // These are initialized lazily.
3751 AutoreleaseRVCallee = 0;
3754 RetainBlockCallee = 0;
3755 AutoreleaseCallee = 0;
3760 bool ObjCARCOpt::runOnFunction(Function &F) {
3764 // If nothing in the Module uses ARC, don't do anything.
3770 PA.setAA(&getAnalysis<AliasAnalysis>());
3772 // This pass performs several distinct transformations. As a compile-time aid
3773 // when compiling code that isn't ObjC, skip these if the relevant ObjC
3774 // library functions aren't declared.
3776 // Preliminary optimizations. This also computs UsedInThisFunction.
3777 OptimizeIndividualCalls(F);
3779 // Optimizations for weak pointers.
3780 if (UsedInThisFunction & ((1 << IC_LoadWeak) |
3781 (1 << IC_LoadWeakRetained) |
3782 (1 << IC_StoreWeak) |
3783 (1 << IC_InitWeak) |
3784 (1 << IC_CopyWeak) |
3785 (1 << IC_MoveWeak) |
3786 (1 << IC_DestroyWeak)))
3787 OptimizeWeakCalls(F);
3789 // Optimizations for retain+release pairs.
3790 if (UsedInThisFunction & ((1 << IC_Retain) |
3791 (1 << IC_RetainRV) |
3792 (1 << IC_RetainBlock)))
3793 if (UsedInThisFunction & (1 << IC_Release))
3794 // Run OptimizeSequences until it either stops making changes or
3795 // no retain+release pair nesting is detected.
3796 while (OptimizeSequences(F)) {}
3798 // Optimizations if objc_autorelease is used.
3799 if (UsedInThisFunction & ((1 << IC_Autorelease) |
3800 (1 << IC_AutoreleaseRV)))
3806 void ObjCARCOpt::releaseMemory() {
3810 //===----------------------------------------------------------------------===//
3812 //===----------------------------------------------------------------------===//
3814 // TODO: ObjCARCContract could insert PHI nodes when uses aren't
3815 // dominated by single calls.
3817 #include "llvm/Analysis/Dominators.h"
3818 #include "llvm/IR/InlineAsm.h"
3819 #include "llvm/IR/Operator.h"
3821 STATISTIC(NumStoreStrongs, "Number objc_storeStrong calls formed");
3824 /// ObjCARCContract - Late ARC optimizations. These change the IR in a way
3825 /// that makes it difficult to be analyzed by ObjCARCOpt, so it's run late.
3826 class ObjCARCContract : public FunctionPass {
3830 ProvenanceAnalysis PA;
3832 /// Run - A flag indicating whether this optimization pass should run.
3835 /// StoreStrongCallee, etc. - Declarations for ObjC runtime
3836 /// functions, for use in creating calls to them. These are initialized
3837 /// lazily to avoid cluttering up the Module with unused declarations.
3838 Constant *StoreStrongCallee,
3839 *RetainAutoreleaseCallee, *RetainAutoreleaseRVCallee;
3841 /// RetainRVMarker - The inline asm string to insert between calls and
3842 /// RetainRV calls to make the optimization work on targets which need it.
3843 const MDString *RetainRVMarker;
3845 /// StoreStrongCalls - The set of inserted objc_storeStrong calls. If
3846 /// at the end of walking the function we have found no alloca
3847 /// instructions, these calls can be marked "tail".
3848 SmallPtrSet<CallInst *, 8> StoreStrongCalls;
3850 Constant *getStoreStrongCallee(Module *M);
3851 Constant *getRetainAutoreleaseCallee(Module *M);
3852 Constant *getRetainAutoreleaseRVCallee(Module *M);
3854 bool ContractAutorelease(Function &F, Instruction *Autorelease,
3855 InstructionClass Class,
3856 SmallPtrSet<Instruction *, 4>
3857 &DependingInstructions,
3858 SmallPtrSet<const BasicBlock *, 4>
3861 void ContractRelease(Instruction *Release,
3862 inst_iterator &Iter);
3864 virtual void getAnalysisUsage(AnalysisUsage &AU) const;
3865 virtual bool doInitialization(Module &M);
3866 virtual bool runOnFunction(Function &F);
3870 ObjCARCContract() : FunctionPass(ID) {
3871 initializeObjCARCContractPass(*PassRegistry::getPassRegistry());
3876 char ObjCARCContract::ID = 0;
3877 INITIALIZE_PASS_BEGIN(ObjCARCContract,
3878 "objc-arc-contract", "ObjC ARC contraction", false, false)
3879 INITIALIZE_AG_DEPENDENCY(AliasAnalysis)
3880 INITIALIZE_PASS_DEPENDENCY(DominatorTree)
3881 INITIALIZE_PASS_END(ObjCARCContract,
3882 "objc-arc-contract", "ObjC ARC contraction", false, false)
3884 Pass *llvm::createObjCARCContractPass() {
3885 return new ObjCARCContract();
3888 void ObjCARCContract::getAnalysisUsage(AnalysisUsage &AU) const {
3889 AU.addRequired<AliasAnalysis>();
3890 AU.addRequired<DominatorTree>();
3891 AU.setPreservesCFG();
3894 Constant *ObjCARCContract::getStoreStrongCallee(Module *M) {
3895 if (!StoreStrongCallee) {
3896 LLVMContext &C = M->getContext();
3897 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
3898 Type *I8XX = PointerType::getUnqual(I8X);
3899 Type *Params[] = { I8XX, I8X };
3901 AttributeSet Attribute = AttributeSet()
3902 .addAttr(M->getContext(), AttributeSet::FunctionIndex,
3903 Attribute::get(C, Attribute::NoUnwind))
3904 .addAttr(M->getContext(), 1, Attribute::get(C, Attribute::NoCapture));
3907 M->getOrInsertFunction(
3909 FunctionType::get(Type::getVoidTy(C), Params, /*isVarArg=*/false),
3912 return StoreStrongCallee;
3915 Constant *ObjCARCContract::getRetainAutoreleaseCallee(Module *M) {
3916 if (!RetainAutoreleaseCallee) {
3917 LLVMContext &C = M->getContext();
3918 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
3919 Type *Params[] = { I8X };
3920 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
3921 AttributeSet Attribute =
3922 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
3923 Attribute::get(C, Attribute::NoUnwind));
3924 RetainAutoreleaseCallee =
3925 M->getOrInsertFunction("objc_retainAutorelease", FTy, Attribute);
3927 return RetainAutoreleaseCallee;
3930 Constant *ObjCARCContract::getRetainAutoreleaseRVCallee(Module *M) {
3931 if (!RetainAutoreleaseRVCallee) {
3932 LLVMContext &C = M->getContext();
3933 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
3934 Type *Params[] = { I8X };
3935 FunctionType *FTy = FunctionType::get(I8X, Params, /*isVarArg=*/false);
3936 AttributeSet Attribute =
3937 AttributeSet().addAttr(M->getContext(), AttributeSet::FunctionIndex,
3938 Attribute::get(C, Attribute::NoUnwind));
3939 RetainAutoreleaseRVCallee =
3940 M->getOrInsertFunction("objc_retainAutoreleaseReturnValue", FTy,
3943 return RetainAutoreleaseRVCallee;
3946 /// ContractAutorelease - Merge an autorelease with a retain into a fused call.
3948 ObjCARCContract::ContractAutorelease(Function &F, Instruction *Autorelease,
3949 InstructionClass Class,
3950 SmallPtrSet<Instruction *, 4>
3951 &DependingInstructions,
3952 SmallPtrSet<const BasicBlock *, 4>
3954 const Value *Arg = GetObjCArg(Autorelease);
3956 // Check that there are no instructions between the retain and the autorelease
3957 // (such as an autorelease_pop) which may change the count.
3958 CallInst *Retain = 0;
3959 if (Class == IC_AutoreleaseRV)
3960 FindDependencies(RetainAutoreleaseRVDep, Arg,
3961 Autorelease->getParent(), Autorelease,
3962 DependingInstructions, Visited, PA);
3964 FindDependencies(RetainAutoreleaseDep, Arg,
3965 Autorelease->getParent(), Autorelease,
3966 DependingInstructions, Visited, PA);
3969 if (DependingInstructions.size() != 1) {
3970 DependingInstructions.clear();
3974 Retain = dyn_cast_or_null<CallInst>(*DependingInstructions.begin());
3975 DependingInstructions.clear();
3978 GetBasicInstructionClass(Retain) != IC_Retain ||
3979 GetObjCArg(Retain) != Arg)
3985 if (Class == IC_AutoreleaseRV)
3986 Retain->setCalledFunction(getRetainAutoreleaseRVCallee(F.getParent()));
3988 Retain->setCalledFunction(getRetainAutoreleaseCallee(F.getParent()));
3990 EraseInstruction(Autorelease);
3994 /// ContractRelease - Attempt to merge an objc_release with a store, load, and
3995 /// objc_retain to form an objc_storeStrong. This can be a little tricky because
3996 /// the instructions don't always appear in order, and there may be unrelated
3997 /// intervening instructions.
3998 void ObjCARCContract::ContractRelease(Instruction *Release,
3999 inst_iterator &Iter) {
4000 LoadInst *Load = dyn_cast<LoadInst>(GetObjCArg(Release));
4001 if (!Load || !Load->isSimple()) return;
4003 // For now, require everything to be in one basic block.
4004 BasicBlock *BB = Release->getParent();
4005 if (Load->getParent() != BB) return;
4007 // Walk down to find the store and the release, which may be in either order.
4008 BasicBlock::iterator I = Load, End = BB->end();
4010 AliasAnalysis::Location Loc = AA->getLocation(Load);
4011 StoreInst *Store = 0;
4012 bool SawRelease = false;
4013 for (; !Store || !SawRelease; ++I) {
4017 Instruction *Inst = I;
4018 if (Inst == Release) {
4023 InstructionClass Class = GetBasicInstructionClass(Inst);
4025 // Unrelated retains are harmless.
4026 if (IsRetain(Class))
4030 // The store is the point where we're going to put the objc_storeStrong,
4031 // so make sure there are no uses after it.
4032 if (CanUse(Inst, Load, PA, Class))
4034 } else if (AA->getModRefInfo(Inst, Loc) & AliasAnalysis::Mod) {
4035 // We are moving the load down to the store, so check for anything
4036 // else which writes to the memory between the load and the store.
4037 Store = dyn_cast<StoreInst>(Inst);
4038 if (!Store || !Store->isSimple()) return;
4039 if (Store->getPointerOperand() != Loc.Ptr) return;
4043 Value *New = StripPointerCastsAndObjCCalls(Store->getValueOperand());
4045 // Walk up to find the retain.
4047 BasicBlock::iterator Begin = BB->begin();
4048 while (I != Begin && GetBasicInstructionClass(I) != IC_Retain)
4050 Instruction *Retain = I;
4051 if (GetBasicInstructionClass(Retain) != IC_Retain) return;
4052 if (GetObjCArg(Retain) != New) return;
4057 LLVMContext &C = Release->getContext();
4058 Type *I8X = PointerType::getUnqual(Type::getInt8Ty(C));
4059 Type *I8XX = PointerType::getUnqual(I8X);
4061 Value *Args[] = { Load->getPointerOperand(), New };
4062 if (Args[0]->getType() != I8XX)
4063 Args[0] = new BitCastInst(Args[0], I8XX, "", Store);
4064 if (Args[1]->getType() != I8X)
4065 Args[1] = new BitCastInst(Args[1], I8X, "", Store);
4066 CallInst *StoreStrong =
4067 CallInst::Create(getStoreStrongCallee(BB->getParent()->getParent()),
4069 StoreStrong->setDoesNotThrow();
4070 StoreStrong->setDebugLoc(Store->getDebugLoc());
4072 // We can't set the tail flag yet, because we haven't yet determined
4073 // whether there are any escaping allocas. Remember this call, so that
4074 // we can set the tail flag once we know it's safe.
4075 StoreStrongCalls.insert(StoreStrong);
4077 if (&*Iter == Store) ++Iter;
4078 Store->eraseFromParent();
4079 Release->eraseFromParent();
4080 EraseInstruction(Retain);
4081 if (Load->use_empty())
4082 Load->eraseFromParent();
4085 bool ObjCARCContract::doInitialization(Module &M) {
4086 // If nothing in the Module uses ARC, don't do anything.
4087 Run = ModuleHasARC(M);
4091 // These are initialized lazily.
4092 StoreStrongCallee = 0;
4093 RetainAutoreleaseCallee = 0;
4094 RetainAutoreleaseRVCallee = 0;
4096 // Initialize RetainRVMarker.
4098 if (NamedMDNode *NMD =
4099 M.getNamedMetadata("clang.arc.retainAutoreleasedReturnValueMarker"))
4100 if (NMD->getNumOperands() == 1) {
4101 const MDNode *N = NMD->getOperand(0);
4102 if (N->getNumOperands() == 1)
4103 if (const MDString *S = dyn_cast<MDString>(N->getOperand(0)))
4110 bool ObjCARCContract::runOnFunction(Function &F) {
4114 // If nothing in the Module uses ARC, don't do anything.
4119 AA = &getAnalysis<AliasAnalysis>();
4120 DT = &getAnalysis<DominatorTree>();
4122 PA.setAA(&getAnalysis<AliasAnalysis>());
4124 // Track whether it's ok to mark objc_storeStrong calls with the "tail"
4125 // keyword. Be conservative if the function has variadic arguments.
4126 // It seems that functions which "return twice" are also unsafe for the
4127 // "tail" argument, because they are setjmp, which could need to
4128 // return to an earlier stack state.
4129 bool TailOkForStoreStrongs = !F.isVarArg() &&
4130 !F.callsFunctionThatReturnsTwice();
4132 // For ObjC library calls which return their argument, replace uses of the
4133 // argument with uses of the call return value, if it dominates the use. This
4134 // reduces register pressure.
4135 SmallPtrSet<Instruction *, 4> DependingInstructions;
4136 SmallPtrSet<const BasicBlock *, 4> Visited;
4137 for (inst_iterator I = inst_begin(&F), E = inst_end(&F); I != E; ) {
4138 Instruction *Inst = &*I++;
4140 DEBUG(dbgs() << "ObjCARCContract: Visiting: " << *Inst << "\n");
4142 // Only these library routines return their argument. In particular,
4143 // objc_retainBlock does not necessarily return its argument.
4144 InstructionClass Class = GetBasicInstructionClass(Inst);
4147 case IC_FusedRetainAutorelease:
4148 case IC_FusedRetainAutoreleaseRV:
4150 case IC_Autorelease:
4151 case IC_AutoreleaseRV:
4152 if (ContractAutorelease(F, Inst, Class, DependingInstructions, Visited))
4156 // If we're compiling for a target which needs a special inline-asm
4157 // marker to do the retainAutoreleasedReturnValue optimization,
4159 if (!RetainRVMarker)
4161 BasicBlock::iterator BBI = Inst;
4162 BasicBlock *InstParent = Inst->getParent();
4164 // Step up to see if the call immediately precedes the RetainRV call.
4165 // If it's an invoke, we have to cross a block boundary. And we have
4166 // to carefully dodge no-op instructions.
4168 if (&*BBI == InstParent->begin()) {
4169 BasicBlock *Pred = InstParent->getSinglePredecessor();
4171 goto decline_rv_optimization;
4172 BBI = Pred->getTerminator();
4176 } while (isNoopInstruction(BBI));
4178 if (&*BBI == GetObjCArg(Inst)) {
4179 DEBUG(dbgs() << "ObjCARCContract: Adding inline asm marker for "
4180 "retainAutoreleasedReturnValue optimization.\n");
4183 InlineAsm::get(FunctionType::get(Type::getVoidTy(Inst->getContext()),
4184 /*isVarArg=*/false),
4185 RetainRVMarker->getString(),
4186 /*Constraints=*/"", /*hasSideEffects=*/true);
4187 CallInst::Create(IA, "", Inst);
4189 decline_rv_optimization:
4193 // objc_initWeak(p, null) => *p = null
4194 CallInst *CI = cast<CallInst>(Inst);
4195 if (isNullOrUndef(CI->getArgOperand(1))) {
4197 ConstantPointerNull::get(cast<PointerType>(CI->getType()));
4199 new StoreInst(Null, CI->getArgOperand(0), CI);
4201 DEBUG(dbgs() << "OBJCARCContract: Old = " << *CI << "\n"
4202 << " New = " << *Null << "\n");
4204 CI->replaceAllUsesWith(Null);
4205 CI->eraseFromParent();
4210 ContractRelease(Inst, I);
4213 // Be conservative if the function has any alloca instructions.
4214 // Technically we only care about escaping alloca instructions,
4215 // but this is sufficient to handle some interesting cases.
4216 if (isa<AllocaInst>(Inst))
4217 TailOkForStoreStrongs = false;
4223 DEBUG(dbgs() << "ObjCARCContract: Finished List.\n\n");
4225 // Don't use GetObjCArg because we don't want to look through bitcasts
4226 // and such; to do the replacement, the argument must have type i8*.
4227 const Value *Arg = cast<CallInst>(Inst)->getArgOperand(0);
4229 // If we're compiling bugpointed code, don't get in trouble.
4230 if (!isa<Instruction>(Arg) && !isa<Argument>(Arg))
4232 // Look through the uses of the pointer.
4233 for (Value::const_use_iterator UI = Arg->use_begin(), UE = Arg->use_end();
4235 Use &U = UI.getUse();
4236 unsigned OperandNo = UI.getOperandNo();
4237 ++UI; // Increment UI now, because we may unlink its element.
4239 // If the call's return value dominates a use of the call's argument
4240 // value, rewrite the use to use the return value. We check for
4241 // reachability here because an unreachable call is considered to
4242 // trivially dominate itself, which would lead us to rewriting its
4243 // argument in terms of its return value, which would lead to
4244 // infinite loops in GetObjCArg.
4245 if (DT->isReachableFromEntry(U) && DT->dominates(Inst, U)) {
4247 Instruction *Replacement = Inst;
4248 Type *UseTy = U.get()->getType();
4249 if (PHINode *PHI = dyn_cast<PHINode>(U.getUser())) {
4250 // For PHI nodes, insert the bitcast in the predecessor block.
4251 unsigned ValNo = PHINode::getIncomingValueNumForOperand(OperandNo);
4252 BasicBlock *BB = PHI->getIncomingBlock(ValNo);
4253 if (Replacement->getType() != UseTy)
4254 Replacement = new BitCastInst(Replacement, UseTy, "",
4256 // While we're here, rewrite all edges for this PHI, rather
4257 // than just one use at a time, to minimize the number of
4258 // bitcasts we emit.
4259 for (unsigned i = 0, e = PHI->getNumIncomingValues(); i != e; ++i)
4260 if (PHI->getIncomingBlock(i) == BB) {
4261 // Keep the UI iterator valid.
4262 if (&PHI->getOperandUse(
4263 PHINode::getOperandNumForIncomingValue(i)) ==
4266 PHI->setIncomingValue(i, Replacement);
4269 if (Replacement->getType() != UseTy)
4270 Replacement = new BitCastInst(Replacement, UseTy, "",
4271 cast<Instruction>(U.getUser()));
4277 // If Arg is a no-op casted pointer, strip one level of casts and iterate.
4278 if (const BitCastInst *BI = dyn_cast<BitCastInst>(Arg))
4279 Arg = BI->getOperand(0);
4280 else if (isa<GEPOperator>(Arg) &&
4281 cast<GEPOperator>(Arg)->hasAllZeroIndices())
4282 Arg = cast<GEPOperator>(Arg)->getPointerOperand();
4283 else if (isa<GlobalAlias>(Arg) &&
4284 !cast<GlobalAlias>(Arg)->mayBeOverridden())
4285 Arg = cast<GlobalAlias>(Arg)->getAliasee();
4291 // If this function has no escaping allocas or suspicious vararg usage,
4292 // objc_storeStrong calls can be marked with the "tail" keyword.
4293 if (TailOkForStoreStrongs)
4294 for (SmallPtrSet<CallInst *, 8>::iterator I = StoreStrongCalls.begin(),
4295 E = StoreStrongCalls.end(); I != E; ++I)
4296 (*I)->setTailCall();
4297 StoreStrongCalls.clear();