1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/StringExtras.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/IR/Attributes.h"
27 #include "llvm/IR/CallSite.h"
28 #include "llvm/IR/CFG.h"
29 #include "llvm/IR/Constants.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/DebugInfo.h"
32 #include "llvm/IR/DerivedTypes.h"
33 #include "llvm/IR/DIBuilder.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/IRBuilder.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/Intrinsics.h"
39 #include "llvm/IR/MDBuilder.h"
40 #include "llvm/IR/Module.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 #include "llvm/Support/CommandLine.h"
48 EnableNoAliasConversion("enable-noalias-to-md-conversion", cl::init(true),
50 cl::desc("Convert noalias attributes to metadata during inlining."));
53 PreserveAlignmentAssumptions("preserve-alignment-assumptions-during-inlining",
54 cl::init(true), cl::Hidden,
55 cl::desc("Convert align attributes to assumptions during inlining."));
57 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
58 AAResults *CalleeAAR, bool InsertLifetime) {
59 return InlineFunction(CallSite(CI), IFI, CalleeAAR, InsertLifetime);
61 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
62 AAResults *CalleeAAR, bool InsertLifetime) {
63 return InlineFunction(CallSite(II), IFI, CalleeAAR, InsertLifetime);
67 /// A class for recording information about inlining a landing pad.
68 class LandingPadInliningInfo {
69 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
70 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
71 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
72 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
73 SmallVector<Value*, 8> UnwindDestPHIValues;
76 LandingPadInliningInfo(InvokeInst *II)
77 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(nullptr),
78 CallerLPad(nullptr), InnerEHValuesPHI(nullptr) {
79 // If there are PHI nodes in the unwind destination block, we need to keep
80 // track of which values came into them from the invoke before removing
81 // the edge from this block.
82 llvm::BasicBlock *InvokeBB = II->getParent();
83 BasicBlock::iterator I = OuterResumeDest->begin();
84 for (; isa<PHINode>(I); ++I) {
85 // Save the value to use for this edge.
86 PHINode *PHI = cast<PHINode>(I);
87 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
90 CallerLPad = cast<LandingPadInst>(I);
93 /// The outer unwind destination is the target of
94 /// unwind edges introduced for calls within the inlined function.
95 BasicBlock *getOuterResumeDest() const {
96 return OuterResumeDest;
99 BasicBlock *getInnerResumeDest();
101 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
103 /// Forward the 'resume' instruction to the caller's landing pad block.
104 /// When the landing pad block has only one predecessor, this is
105 /// a simple branch. When there is more than one predecessor, we need to
106 /// split the landing pad block after the landingpad instruction and jump
108 void forwardResume(ResumeInst *RI,
109 SmallPtrSetImpl<LandingPadInst*> &InlinedLPads);
111 /// Add incoming-PHI values to the unwind destination block for the given
112 /// basic block, using the values for the original invoke's source block.
113 void addIncomingPHIValuesFor(BasicBlock *BB) const {
114 addIncomingPHIValuesForInto(BB, OuterResumeDest);
117 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
118 BasicBlock::iterator I = dest->begin();
119 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
120 PHINode *phi = cast<PHINode>(I);
121 phi->addIncoming(UnwindDestPHIValues[i], src);
125 } // anonymous namespace
127 /// Get or create a target for the branch from ResumeInsts.
128 BasicBlock *LandingPadInliningInfo::getInnerResumeDest() {
129 if (InnerResumeDest) return InnerResumeDest;
131 // Split the landing pad.
132 BasicBlock::iterator SplitPoint = ++CallerLPad->getIterator();
134 OuterResumeDest->splitBasicBlock(SplitPoint,
135 OuterResumeDest->getName() + ".body");
137 // The number of incoming edges we expect to the inner landing pad.
138 const unsigned PHICapacity = 2;
140 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
141 Instruction *InsertPoint = &InnerResumeDest->front();
142 BasicBlock::iterator I = OuterResumeDest->begin();
143 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
144 PHINode *OuterPHI = cast<PHINode>(I);
145 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
146 OuterPHI->getName() + ".lpad-body",
148 OuterPHI->replaceAllUsesWith(InnerPHI);
149 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
152 // Create a PHI for the exception values.
153 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
154 "eh.lpad-body", InsertPoint);
155 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
156 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
159 return InnerResumeDest;
162 /// Forward the 'resume' instruction to the caller's landing pad block.
163 /// When the landing pad block has only one predecessor, this is a simple
164 /// branch. When there is more than one predecessor, we need to split the
165 /// landing pad block after the landingpad instruction and jump to there.
166 void LandingPadInliningInfo::forwardResume(
167 ResumeInst *RI, SmallPtrSetImpl<LandingPadInst *> &InlinedLPads) {
168 BasicBlock *Dest = getInnerResumeDest();
169 BasicBlock *Src = RI->getParent();
171 BranchInst::Create(Dest, Src);
173 // Update the PHIs in the destination. They were inserted in an order which
175 addIncomingPHIValuesForInto(Src, Dest);
177 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
178 RI->eraseFromParent();
181 /// When we inline a basic block into an invoke,
182 /// we have to turn all of the calls that can throw into invokes.
183 /// This function analyze BB to see if there are any calls, and if so,
184 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
185 /// nodes in that block with the values specified in InvokeDestPHIValues.
187 HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB, BasicBlock *UnwindEdge) {
188 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
189 Instruction *I = &*BBI++;
191 // We only need to check for function calls: inlined invoke
192 // instructions require no special handling.
193 CallInst *CI = dyn_cast<CallInst>(I);
195 // If this call cannot unwind, don't convert it to an invoke.
196 // Inline asm calls cannot throw.
197 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
200 // Convert this function call into an invoke instruction. First, split the
203 BB->splitBasicBlock(CI->getIterator(), CI->getName() + ".noexc");
205 // Delete the unconditional branch inserted by splitBasicBlock
206 BB->getInstList().pop_back();
208 // Create the new invoke instruction.
209 ImmutableCallSite CS(CI);
210 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
211 SmallVector<OperandBundleDef, 1> OpBundles;
213 // Copy the OperandBundeUse instances to OperandBundleDefs. These two are
214 // *different* representations of operand bundles: see the documentation in
215 // InstrTypes.h for more details.
216 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
217 OpBundles.emplace_back(CS.getOperandBundleAt(i));
219 // Note: we're round tripping operand bundles through memory here, and that
220 // can potentially be avoided with a cleverer API design that we do not have
224 InvokeInst::Create(CI->getCalledValue(), Split, UnwindEdge, InvokeArgs,
225 OpBundles, CI->getName(), BB);
226 II->setDebugLoc(CI->getDebugLoc());
227 II->setCallingConv(CI->getCallingConv());
228 II->setAttributes(CI->getAttributes());
230 // Make sure that anything using the call now uses the invoke! This also
231 // updates the CallGraph if present, because it uses a WeakVH.
232 CI->replaceAllUsesWith(II);
234 // Delete the original call
235 Split->getInstList().pop_front();
241 /// If we inlined an invoke site, we need to convert calls
242 /// in the body of the inlined function into invokes.
244 /// II is the invoke instruction being inlined. FirstNewBlock is the first
245 /// block of the inlined code (the last block is the end of the function),
246 /// and InlineCodeInfo is information about the code that got inlined.
247 static void HandleInlinedLandingPad(InvokeInst *II, BasicBlock *FirstNewBlock,
248 ClonedCodeInfo &InlinedCodeInfo) {
249 BasicBlock *InvokeDest = II->getUnwindDest();
251 Function *Caller = FirstNewBlock->getParent();
253 // The inlined code is currently at the end of the function, scan from the
254 // start of the inlined code to its end, checking for stuff we need to
256 LandingPadInliningInfo Invoke(II);
258 // Get all of the inlined landing pad instructions.
259 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
260 for (Function::iterator I = FirstNewBlock->getIterator(), E = Caller->end();
262 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
263 InlinedLPads.insert(II->getLandingPadInst());
265 // Append the clauses from the outer landing pad instruction into the inlined
266 // landing pad instructions.
267 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
268 for (LandingPadInst *InlinedLPad : InlinedLPads) {
269 unsigned OuterNum = OuterLPad->getNumClauses();
270 InlinedLPad->reserveClauses(OuterNum);
271 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
272 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
273 if (OuterLPad->isCleanup())
274 InlinedLPad->setCleanup(true);
277 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
279 if (InlinedCodeInfo.ContainsCalls)
280 if (BasicBlock *NewBB = HandleCallsInBlockInlinedThroughInvoke(
281 &*BB, Invoke.getOuterResumeDest()))
282 // Update any PHI nodes in the exceptional block to indicate that there
283 // is now a new entry in them.
284 Invoke.addIncomingPHIValuesFor(NewBB);
286 // Forward any resumes that are remaining here.
287 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
288 Invoke.forwardResume(RI, InlinedLPads);
291 // Now that everything is happy, we have one final detail. The PHI nodes in
292 // the exception destination block still have entries due to the original
293 // invoke instruction. Eliminate these entries (which might even delete the
295 InvokeDest->removePredecessor(II->getParent());
298 /// If we inlined an invoke site, we need to convert calls
299 /// in the body of the inlined function into invokes.
301 /// II is the invoke instruction being inlined. FirstNewBlock is the first
302 /// block of the inlined code (the last block is the end of the function),
303 /// and InlineCodeInfo is information about the code that got inlined.
304 static void HandleInlinedEHPad(InvokeInst *II, BasicBlock *FirstNewBlock,
305 ClonedCodeInfo &InlinedCodeInfo) {
306 BasicBlock *UnwindDest = II->getUnwindDest();
307 Function *Caller = FirstNewBlock->getParent();
309 assert(UnwindDest->getFirstNonPHI()->isEHPad() && "unexpected BasicBlock!");
311 // If there are PHI nodes in the unwind destination block, we need to keep
312 // track of which values came into them from the invoke before removing the
313 // edge from this block.
314 SmallVector<Value *, 8> UnwindDestPHIValues;
315 llvm::BasicBlock *InvokeBB = II->getParent();
316 for (Instruction &I : *UnwindDest) {
317 // Save the value to use for this edge.
318 PHINode *PHI = dyn_cast<PHINode>(&I);
321 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
324 // Add incoming-PHI values to the unwind destination block for the given basic
325 // block, using the values for the original invoke's source block.
326 auto UpdatePHINodes = [&](BasicBlock *Src) {
327 BasicBlock::iterator I = UnwindDest->begin();
328 for (Value *V : UnwindDestPHIValues) {
329 PHINode *PHI = cast<PHINode>(I);
330 PHI->addIncoming(V, Src);
335 // Forward EH terminator instructions to the caller's invoke destination.
336 // This is as simple as connect all the instructions which 'unwind to caller'
337 // to the invoke destination.
338 for (Function::iterator BB = FirstNewBlock->getIterator(), E = Caller->end();
340 Instruction *I = BB->getFirstNonPHI();
342 if (auto *CEPI = dyn_cast<CatchEndPadInst>(I)) {
343 if (CEPI->unwindsToCaller()) {
344 CatchEndPadInst::Create(CEPI->getContext(), UnwindDest, CEPI);
345 CEPI->eraseFromParent();
346 UpdatePHINodes(&*BB);
348 } else if (auto *CEPI = dyn_cast<CleanupEndPadInst>(I)) {
349 if (CEPI->unwindsToCaller()) {
350 CleanupEndPadInst::Create(CEPI->getCleanupPad(), UnwindDest, CEPI);
351 CEPI->eraseFromParent();
352 UpdatePHINodes(&*BB);
354 } else if (auto *TPI = dyn_cast<TerminatePadInst>(I)) {
355 if (TPI->unwindsToCaller()) {
356 SmallVector<Value *, 3> TerminatePadArgs;
357 for (Value *ArgOperand : TPI->arg_operands())
358 TerminatePadArgs.push_back(ArgOperand);
359 TerminatePadInst::Create(TPI->getContext(), UnwindDest,
360 TerminatePadArgs, TPI);
361 TPI->eraseFromParent();
362 UpdatePHINodes(&*BB);
365 assert(isa<CatchPadInst>(I) || isa<CleanupPadInst>(I));
369 if (auto *CRI = dyn_cast<CleanupReturnInst>(BB->getTerminator())) {
370 if (CRI->unwindsToCaller()) {
371 CleanupReturnInst::Create(CRI->getCleanupPad(), UnwindDest, CRI);
372 CRI->eraseFromParent();
373 UpdatePHINodes(&*BB);
378 if (InlinedCodeInfo.ContainsCalls)
379 for (Function::iterator BB = FirstNewBlock->getIterator(),
382 if (BasicBlock *NewBB =
383 HandleCallsInBlockInlinedThroughInvoke(&*BB, UnwindDest))
384 // Update any PHI nodes in the exceptional block to indicate that there
385 // is now a new entry in them.
386 UpdatePHINodes(NewBB);
388 // Now that everything is happy, we have one final detail. The PHI nodes in
389 // the exception destination block still have entries due to the original
390 // invoke instruction. Eliminate these entries (which might even delete the
392 UnwindDest->removePredecessor(InvokeBB);
395 /// When inlining a function that contains noalias scope metadata,
396 /// this metadata needs to be cloned so that the inlined blocks
397 /// have different "unqiue scopes" at every call site. Were this not done, then
398 /// aliasing scopes from a function inlined into a caller multiple times could
399 /// not be differentiated (and this would lead to miscompiles because the
400 /// non-aliasing property communicated by the metadata could have
401 /// call-site-specific control dependencies).
402 static void CloneAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap) {
403 const Function *CalledFunc = CS.getCalledFunction();
404 SetVector<const MDNode *> MD;
406 // Note: We could only clone the metadata if it is already used in the
407 // caller. I'm omitting that check here because it might confuse
408 // inter-procedural alias analysis passes. We can revisit this if it becomes
409 // an efficiency or overhead problem.
411 for (Function::const_iterator I = CalledFunc->begin(), IE = CalledFunc->end();
413 for (BasicBlock::const_iterator J = I->begin(), JE = I->end(); J != JE; ++J) {
414 if (const MDNode *M = J->getMetadata(LLVMContext::MD_alias_scope))
416 if (const MDNode *M = J->getMetadata(LLVMContext::MD_noalias))
423 // Walk the existing metadata, adding the complete (perhaps cyclic) chain to
425 SmallVector<const Metadata *, 16> Queue(MD.begin(), MD.end());
426 while (!Queue.empty()) {
427 const MDNode *M = cast<MDNode>(Queue.pop_back_val());
428 for (unsigned i = 0, ie = M->getNumOperands(); i != ie; ++i)
429 if (const MDNode *M1 = dyn_cast<MDNode>(M->getOperand(i)))
434 // Now we have a complete set of all metadata in the chains used to specify
435 // the noalias scopes and the lists of those scopes.
436 SmallVector<TempMDTuple, 16> DummyNodes;
437 DenseMap<const MDNode *, TrackingMDNodeRef> MDMap;
438 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
440 DummyNodes.push_back(MDTuple::getTemporary(CalledFunc->getContext(), None));
441 MDMap[*I].reset(DummyNodes.back().get());
444 // Create new metadata nodes to replace the dummy nodes, replacing old
445 // metadata references with either a dummy node or an already-created new
447 for (SetVector<const MDNode *>::iterator I = MD.begin(), IE = MD.end();
449 SmallVector<Metadata *, 4> NewOps;
450 for (unsigned i = 0, ie = (*I)->getNumOperands(); i != ie; ++i) {
451 const Metadata *V = (*I)->getOperand(i);
452 if (const MDNode *M = dyn_cast<MDNode>(V))
453 NewOps.push_back(MDMap[M]);
455 NewOps.push_back(const_cast<Metadata *>(V));
458 MDNode *NewM = MDNode::get(CalledFunc->getContext(), NewOps);
459 MDTuple *TempM = cast<MDTuple>(MDMap[*I]);
460 assert(TempM->isTemporary() && "Expected temporary node");
462 TempM->replaceAllUsesWith(NewM);
465 // Now replace the metadata in the new inlined instructions with the
466 // repacements from the map.
467 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
468 VMI != VMIE; ++VMI) {
472 Instruction *NI = dyn_cast<Instruction>(VMI->second);
476 if (MDNode *M = NI->getMetadata(LLVMContext::MD_alias_scope)) {
477 MDNode *NewMD = MDMap[M];
478 // If the call site also had alias scope metadata (a list of scopes to
479 // which instructions inside it might belong), propagate those scopes to
480 // the inlined instructions.
482 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
483 NewMD = MDNode::concatenate(NewMD, CSM);
484 NI->setMetadata(LLVMContext::MD_alias_scope, NewMD);
485 } else if (NI->mayReadOrWriteMemory()) {
487 CS.getInstruction()->getMetadata(LLVMContext::MD_alias_scope))
488 NI->setMetadata(LLVMContext::MD_alias_scope, M);
491 if (MDNode *M = NI->getMetadata(LLVMContext::MD_noalias)) {
492 MDNode *NewMD = MDMap[M];
493 // If the call site also had noalias metadata (a list of scopes with
494 // which instructions inside it don't alias), propagate those scopes to
495 // the inlined instructions.
497 CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
498 NewMD = MDNode::concatenate(NewMD, CSM);
499 NI->setMetadata(LLVMContext::MD_noalias, NewMD);
500 } else if (NI->mayReadOrWriteMemory()) {
501 if (MDNode *M = CS.getInstruction()->getMetadata(LLVMContext::MD_noalias))
502 NI->setMetadata(LLVMContext::MD_noalias, M);
507 /// If the inlined function has noalias arguments,
508 /// then add new alias scopes for each noalias argument, tag the mapped noalias
509 /// parameters with noalias metadata specifying the new scope, and tag all
510 /// non-derived loads, stores and memory intrinsics with the new alias scopes.
511 static void AddAliasScopeMetadata(CallSite CS, ValueToValueMapTy &VMap,
512 const DataLayout &DL, AAResults *CalleeAAR) {
513 if (!EnableNoAliasConversion)
516 const Function *CalledFunc = CS.getCalledFunction();
517 SmallVector<const Argument *, 4> NoAliasArgs;
519 for (const Argument &I : CalledFunc->args()) {
520 if (I.hasNoAliasAttr() && !I.hasNUses(0))
521 NoAliasArgs.push_back(&I);
524 if (NoAliasArgs.empty())
527 // To do a good job, if a noalias variable is captured, we need to know if
528 // the capture point dominates the particular use we're considering.
530 DT.recalculate(const_cast<Function&>(*CalledFunc));
532 // noalias indicates that pointer values based on the argument do not alias
533 // pointer values which are not based on it. So we add a new "scope" for each
534 // noalias function argument. Accesses using pointers based on that argument
535 // become part of that alias scope, accesses using pointers not based on that
536 // argument are tagged as noalias with that scope.
538 DenseMap<const Argument *, MDNode *> NewScopes;
539 MDBuilder MDB(CalledFunc->getContext());
541 // Create a new scope domain for this function.
543 MDB.createAnonymousAliasScopeDomain(CalledFunc->getName());
544 for (unsigned i = 0, e = NoAliasArgs.size(); i != e; ++i) {
545 const Argument *A = NoAliasArgs[i];
547 std::string Name = CalledFunc->getName();
550 Name += A->getName();
552 Name += ": argument ";
556 // Note: We always create a new anonymous root here. This is true regardless
557 // of the linkage of the callee because the aliasing "scope" is not just a
558 // property of the callee, but also all control dependencies in the caller.
559 MDNode *NewScope = MDB.createAnonymousAliasScope(NewDomain, Name);
560 NewScopes.insert(std::make_pair(A, NewScope));
563 // Iterate over all new instructions in the map; for all memory-access
564 // instructions, add the alias scope metadata.
565 for (ValueToValueMapTy::iterator VMI = VMap.begin(), VMIE = VMap.end();
566 VMI != VMIE; ++VMI) {
567 if (const Instruction *I = dyn_cast<Instruction>(VMI->first)) {
571 Instruction *NI = dyn_cast<Instruction>(VMI->second);
575 bool IsArgMemOnlyCall = false, IsFuncCall = false;
576 SmallVector<const Value *, 2> PtrArgs;
578 if (const LoadInst *LI = dyn_cast<LoadInst>(I))
579 PtrArgs.push_back(LI->getPointerOperand());
580 else if (const StoreInst *SI = dyn_cast<StoreInst>(I))
581 PtrArgs.push_back(SI->getPointerOperand());
582 else if (const VAArgInst *VAAI = dyn_cast<VAArgInst>(I))
583 PtrArgs.push_back(VAAI->getPointerOperand());
584 else if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I))
585 PtrArgs.push_back(CXI->getPointerOperand());
586 else if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I))
587 PtrArgs.push_back(RMWI->getPointerOperand());
588 else if (ImmutableCallSite ICS = ImmutableCallSite(I)) {
589 // If we know that the call does not access memory, then we'll still
590 // know that about the inlined clone of this call site, and we don't
591 // need to add metadata.
592 if (ICS.doesNotAccessMemory())
597 FunctionModRefBehavior MRB = CalleeAAR->getModRefBehavior(ICS);
598 if (MRB == FMRB_OnlyAccessesArgumentPointees ||
599 MRB == FMRB_OnlyReadsArgumentPointees)
600 IsArgMemOnlyCall = true;
603 for (ImmutableCallSite::arg_iterator AI = ICS.arg_begin(),
604 AE = ICS.arg_end(); AI != AE; ++AI) {
605 // We need to check the underlying objects of all arguments, not just
606 // the pointer arguments, because we might be passing pointers as
608 // However, if we know that the call only accesses pointer arguments,
609 // then we only need to check the pointer arguments.
610 if (IsArgMemOnlyCall && !(*AI)->getType()->isPointerTy())
613 PtrArgs.push_back(*AI);
617 // If we found no pointers, then this instruction is not suitable for
618 // pairing with an instruction to receive aliasing metadata.
619 // However, if this is a call, this we might just alias with none of the
620 // noalias arguments.
621 if (PtrArgs.empty() && !IsFuncCall)
624 // It is possible that there is only one underlying object, but you
625 // need to go through several PHIs to see it, and thus could be
626 // repeated in the Objects list.
627 SmallPtrSet<const Value *, 4> ObjSet;
628 SmallVector<Metadata *, 4> Scopes, NoAliases;
630 SmallSetVector<const Argument *, 4> NAPtrArgs;
631 for (unsigned i = 0, ie = PtrArgs.size(); i != ie; ++i) {
632 SmallVector<Value *, 4> Objects;
633 GetUnderlyingObjects(const_cast<Value*>(PtrArgs[i]),
634 Objects, DL, /* LI = */ nullptr);
636 for (Value *O : Objects)
640 // Figure out if we're derived from anything that is not a noalias
642 bool CanDeriveViaCapture = false, UsesAliasingPtr = false;
643 for (const Value *V : ObjSet) {
644 // Is this value a constant that cannot be derived from any pointer
645 // value (we need to exclude constant expressions, for example, that
646 // are formed from arithmetic on global symbols).
647 bool IsNonPtrConst = isa<ConstantInt>(V) || isa<ConstantFP>(V) ||
648 isa<ConstantPointerNull>(V) ||
649 isa<ConstantDataVector>(V) || isa<UndefValue>(V);
653 // If this is anything other than a noalias argument, then we cannot
654 // completely describe the aliasing properties using alias.scope
655 // metadata (and, thus, won't add any).
656 if (const Argument *A = dyn_cast<Argument>(V)) {
657 if (!A->hasNoAliasAttr())
658 UsesAliasingPtr = true;
660 UsesAliasingPtr = true;
663 // If this is not some identified function-local object (which cannot
664 // directly alias a noalias argument), or some other argument (which,
665 // by definition, also cannot alias a noalias argument), then we could
666 // alias a noalias argument that has been captured).
667 if (!isa<Argument>(V) &&
668 !isIdentifiedFunctionLocal(const_cast<Value*>(V)))
669 CanDeriveViaCapture = true;
672 // A function call can always get captured noalias pointers (via other
673 // parameters, globals, etc.).
674 if (IsFuncCall && !IsArgMemOnlyCall)
675 CanDeriveViaCapture = true;
677 // First, we want to figure out all of the sets with which we definitely
678 // don't alias. Iterate over all noalias set, and add those for which:
679 // 1. The noalias argument is not in the set of objects from which we
680 // definitely derive.
681 // 2. The noalias argument has not yet been captured.
682 // An arbitrary function that might load pointers could see captured
683 // noalias arguments via other noalias arguments or globals, and so we
684 // must always check for prior capture.
685 for (const Argument *A : NoAliasArgs) {
686 if (!ObjSet.count(A) && (!CanDeriveViaCapture ||
687 // It might be tempting to skip the
688 // PointerMayBeCapturedBefore check if
689 // A->hasNoCaptureAttr() is true, but this is
690 // incorrect because nocapture only guarantees
691 // that no copies outlive the function, not
692 // that the value cannot be locally captured.
693 !PointerMayBeCapturedBefore(A,
694 /* ReturnCaptures */ false,
695 /* StoreCaptures */ false, I, &DT)))
696 NoAliases.push_back(NewScopes[A]);
699 if (!NoAliases.empty())
700 NI->setMetadata(LLVMContext::MD_noalias,
702 NI->getMetadata(LLVMContext::MD_noalias),
703 MDNode::get(CalledFunc->getContext(), NoAliases)));
705 // Next, we want to figure out all of the sets to which we might belong.
706 // We might belong to a set if the noalias argument is in the set of
707 // underlying objects. If there is some non-noalias argument in our list
708 // of underlying objects, then we cannot add a scope because the fact
709 // that some access does not alias with any set of our noalias arguments
710 // cannot itself guarantee that it does not alias with this access
711 // (because there is some pointer of unknown origin involved and the
712 // other access might also depend on this pointer). We also cannot add
713 // scopes to arbitrary functions unless we know they don't access any
714 // non-parameter pointer-values.
715 bool CanAddScopes = !UsesAliasingPtr;
716 if (CanAddScopes && IsFuncCall)
717 CanAddScopes = IsArgMemOnlyCall;
720 for (const Argument *A : NoAliasArgs) {
722 Scopes.push_back(NewScopes[A]);
727 LLVMContext::MD_alias_scope,
728 MDNode::concatenate(NI->getMetadata(LLVMContext::MD_alias_scope),
729 MDNode::get(CalledFunc->getContext(), Scopes)));
734 /// If the inlined function has non-byval align arguments, then
735 /// add @llvm.assume-based alignment assumptions to preserve this information.
736 static void AddAlignmentAssumptions(CallSite CS, InlineFunctionInfo &IFI) {
737 if (!PreserveAlignmentAssumptions)
739 auto &DL = CS.getCaller()->getParent()->getDataLayout();
741 // To avoid inserting redundant assumptions, we should check for assumptions
742 // already in the caller. To do this, we might need a DT of the caller.
744 bool DTCalculated = false;
746 Function *CalledFunc = CS.getCalledFunction();
747 for (Function::arg_iterator I = CalledFunc->arg_begin(),
748 E = CalledFunc->arg_end();
750 unsigned Align = I->getType()->isPointerTy() ? I->getParamAlignment() : 0;
751 if (Align && !I->hasByValOrInAllocaAttr() && !I->hasNUses(0)) {
753 DT.recalculate(const_cast<Function&>(*CS.getInstruction()->getParent()
758 // If we can already prove the asserted alignment in the context of the
759 // caller, then don't bother inserting the assumption.
760 Value *Arg = CS.getArgument(I->getArgNo());
761 if (getKnownAlignment(Arg, DL, CS.getInstruction(),
762 &IFI.ACT->getAssumptionCache(*CS.getCaller()),
766 IRBuilder<>(CS.getInstruction())
767 .CreateAlignmentAssumption(DL, Arg, Align);
772 /// Once we have cloned code over from a callee into the caller,
773 /// update the specified callgraph to reflect the changes we made.
774 /// Note that it's possible that not all code was copied over, so only
775 /// some edges of the callgraph may remain.
776 static void UpdateCallGraphAfterInlining(CallSite CS,
777 Function::iterator FirstNewBlock,
778 ValueToValueMapTy &VMap,
779 InlineFunctionInfo &IFI) {
780 CallGraph &CG = *IFI.CG;
781 const Function *Caller = CS.getInstruction()->getParent()->getParent();
782 const Function *Callee = CS.getCalledFunction();
783 CallGraphNode *CalleeNode = CG[Callee];
784 CallGraphNode *CallerNode = CG[Caller];
786 // Since we inlined some uninlined call sites in the callee into the caller,
787 // add edges from the caller to all of the callees of the callee.
788 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
790 // Consider the case where CalleeNode == CallerNode.
791 CallGraphNode::CalledFunctionsVector CallCache;
792 if (CalleeNode == CallerNode) {
793 CallCache.assign(I, E);
794 I = CallCache.begin();
798 for (; I != E; ++I) {
799 const Value *OrigCall = I->first;
801 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
802 // Only copy the edge if the call was inlined!
803 if (VMI == VMap.end() || VMI->second == nullptr)
806 // If the call was inlined, but then constant folded, there is no edge to
807 // add. Check for this case.
808 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
812 // We do not treat intrinsic calls like real function calls because we
813 // expect them to become inline code; do not add an edge for an intrinsic.
814 CallSite CS = CallSite(NewCall);
815 if (CS && CS.getCalledFunction() && CS.getCalledFunction()->isIntrinsic())
818 // Remember that this call site got inlined for the client of
820 IFI.InlinedCalls.push_back(NewCall);
822 // It's possible that inlining the callsite will cause it to go from an
823 // indirect to a direct call by resolving a function pointer. If this
824 // happens, set the callee of the new call site to a more precise
825 // destination. This can also happen if the call graph node of the caller
826 // was just unnecessarily imprecise.
827 if (!I->second->getFunction())
828 if (Function *F = CallSite(NewCall).getCalledFunction()) {
829 // Indirect call site resolved to direct call.
830 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
835 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
838 // Update the call graph by deleting the edge from Callee to Caller. We must
839 // do this after the loop above in case Caller and Callee are the same.
840 CallerNode->removeCallEdgeFor(CS);
843 static void HandleByValArgumentInit(Value *Dst, Value *Src, Module *M,
844 BasicBlock *InsertBlock,
845 InlineFunctionInfo &IFI) {
846 Type *AggTy = cast<PointerType>(Src->getType())->getElementType();
847 IRBuilder<> Builder(InsertBlock, InsertBlock->begin());
849 Value *Size = Builder.getInt64(M->getDataLayout().getTypeStoreSize(AggTy));
851 // Always generate a memcpy of alignment 1 here because we don't know
852 // the alignment of the src pointer. Other optimizations can infer
854 Builder.CreateMemCpy(Dst, Src, Size, /*Align=*/1);
857 /// When inlining a call site that has a byval argument,
858 /// we have to make the implicit memcpy explicit by adding it.
859 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
860 const Function *CalledFunc,
861 InlineFunctionInfo &IFI,
862 unsigned ByValAlignment) {
863 PointerType *ArgTy = cast<PointerType>(Arg->getType());
864 Type *AggTy = ArgTy->getElementType();
866 Function *Caller = TheCall->getParent()->getParent();
868 // If the called function is readonly, then it could not mutate the caller's
869 // copy of the byval'd memory. In this case, it is safe to elide the copy and
871 if (CalledFunc->onlyReadsMemory()) {
872 // If the byval argument has a specified alignment that is greater than the
873 // passed in pointer, then we either have to round up the input pointer or
874 // give up on this transformation.
875 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
878 const DataLayout &DL = Caller->getParent()->getDataLayout();
880 // If the pointer is already known to be sufficiently aligned, or if we can
881 // round it up to a larger alignment, then we don't need a temporary.
882 if (getOrEnforceKnownAlignment(Arg, ByValAlignment, DL, TheCall,
883 &IFI.ACT->getAssumptionCache(*Caller)) >=
887 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
888 // for code quality, but rarely happens and is required for correctness.
891 // Create the alloca. If we have DataLayout, use nice alignment.
893 Caller->getParent()->getDataLayout().getPrefTypeAlignment(AggTy);
895 // If the byval had an alignment specified, we *must* use at least that
896 // alignment, as it is required by the byval argument (and uses of the
897 // pointer inside the callee).
898 Align = std::max(Align, ByValAlignment);
900 Value *NewAlloca = new AllocaInst(AggTy, nullptr, Align, Arg->getName(),
901 &*Caller->begin()->begin());
902 IFI.StaticAllocas.push_back(cast<AllocaInst>(NewAlloca));
904 // Uses of the argument in the function should use our new alloca
909 // Check whether this Value is used by a lifetime intrinsic.
910 static bool isUsedByLifetimeMarker(Value *V) {
911 for (User *U : V->users()) {
912 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
913 switch (II->getIntrinsicID()) {
915 case Intrinsic::lifetime_start:
916 case Intrinsic::lifetime_end:
924 // Check whether the given alloca already has
925 // lifetime.start or lifetime.end intrinsics.
926 static bool hasLifetimeMarkers(AllocaInst *AI) {
927 Type *Ty = AI->getType();
928 Type *Int8PtrTy = Type::getInt8PtrTy(Ty->getContext(),
929 Ty->getPointerAddressSpace());
931 return isUsedByLifetimeMarker(AI);
933 // Do a scan to find all the casts to i8*.
934 for (User *U : AI->users()) {
935 if (U->getType() != Int8PtrTy) continue;
936 if (U->stripPointerCasts() != AI) continue;
937 if (isUsedByLifetimeMarker(U))
943 /// Rebuild the entire inlined-at chain for this instruction so that the top of
944 /// the chain now is inlined-at the new call site.
946 updateInlinedAtInfo(DebugLoc DL, DILocation *InlinedAtNode, LLVMContext &Ctx,
947 DenseMap<const DILocation *, DILocation *> &IANodes) {
948 SmallVector<DILocation *, 3> InlinedAtLocations;
949 DILocation *Last = InlinedAtNode;
950 DILocation *CurInlinedAt = DL;
952 // Gather all the inlined-at nodes
953 while (DILocation *IA = CurInlinedAt->getInlinedAt()) {
954 // Skip any we've already built nodes for
955 if (DILocation *Found = IANodes[IA]) {
960 InlinedAtLocations.push_back(IA);
964 // Starting from the top, rebuild the nodes to point to the new inlined-at
965 // location (then rebuilding the rest of the chain behind it) and update the
966 // map of already-constructed inlined-at nodes.
967 for (const DILocation *MD : make_range(InlinedAtLocations.rbegin(),
968 InlinedAtLocations.rend())) {
969 Last = IANodes[MD] = DILocation::getDistinct(
970 Ctx, MD->getLine(), MD->getColumn(), MD->getScope(), Last);
973 // And finally create the normal location for this instruction, referring to
974 // the new inlined-at chain.
975 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(), Last);
978 /// Update inlined instructions' line numbers to
979 /// to encode location where these instructions are inlined.
980 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
981 Instruction *TheCall) {
982 DebugLoc TheCallDL = TheCall->getDebugLoc();
986 auto &Ctx = Fn->getContext();
987 DILocation *InlinedAtNode = TheCallDL;
989 // Create a unique call site, not to be confused with any other call from the
991 InlinedAtNode = DILocation::getDistinct(
992 Ctx, InlinedAtNode->getLine(), InlinedAtNode->getColumn(),
993 InlinedAtNode->getScope(), InlinedAtNode->getInlinedAt());
995 // Cache the inlined-at nodes as they're built so they are reused, without
996 // this every instruction's inlined-at chain would become distinct from each
998 DenseMap<const DILocation *, DILocation *> IANodes;
1000 for (; FI != Fn->end(); ++FI) {
1001 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
1003 DebugLoc DL = BI->getDebugLoc();
1005 // If the inlined instruction has no line number, make it look as if it
1006 // originates from the call location. This is important for
1007 // ((__always_inline__, __nodebug__)) functions which must use caller
1008 // location for all instructions in their function body.
1010 // Don't update static allocas, as they may get moved later.
1011 if (auto *AI = dyn_cast<AllocaInst>(BI))
1012 if (isa<Constant>(AI->getArraySize()))
1015 BI->setDebugLoc(TheCallDL);
1017 BI->setDebugLoc(updateInlinedAtInfo(DL, InlinedAtNode, BI->getContext(), IANodes));
1023 /// This function inlines the called function into the basic block of the
1024 /// caller. This returns false if it is not possible to inline this call.
1025 /// The program is still in a well defined state if this occurs though.
1027 /// Note that this only does one level of inlining. For example, if the
1028 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
1029 /// exists in the instruction stream. Similarly this will inline a recursive
1030 /// function by one level.
1031 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
1032 AAResults *CalleeAAR, bool InsertLifetime) {
1033 Instruction *TheCall = CS.getInstruction();
1034 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
1035 "Instruction not in function!");
1037 // If IFI has any state in it, zap it before we fill it in.
1040 const Function *CalledFunc = CS.getCalledFunction();
1041 if (!CalledFunc || // Can't inline external function or indirect
1042 CalledFunc->isDeclaration() || // call, or call to a vararg function!
1043 CalledFunc->getFunctionType()->isVarArg()) return false;
1045 // The inliner does not know how to inline through calls with operand bundles
1047 if (CS.hasOperandBundles()) {
1048 // ... but it knows how to inline through "deopt" operand bundles.
1050 CS.getNumOperandBundles() == 1 &&
1051 CS.getOperandBundleAt(0).getTagID() == LLVMContext::OB_deopt;
1056 // If the call to the callee cannot throw, set the 'nounwind' flag on any
1057 // calls that we inline.
1058 bool MarkNoUnwind = CS.doesNotThrow();
1060 BasicBlock *OrigBB = TheCall->getParent();
1061 Function *Caller = OrigBB->getParent();
1063 // GC poses two hazards to inlining, which only occur when the callee has GC:
1064 // 1. If the caller has no GC, then the callee's GC must be propagated to the
1066 // 2. If the caller has a differing GC, it is invalid to inline.
1067 if (CalledFunc->hasGC()) {
1068 if (!Caller->hasGC())
1069 Caller->setGC(CalledFunc->getGC());
1070 else if (CalledFunc->getGC() != Caller->getGC())
1074 // Get the personality function from the callee if it contains a landing pad.
1075 Constant *CalledPersonality =
1076 CalledFunc->hasPersonalityFn()
1077 ? CalledFunc->getPersonalityFn()->stripPointerCasts()
1080 // Find the personality function used by the landing pads of the caller. If it
1081 // exists, then check to see that it matches the personality function used in
1083 Constant *CallerPersonality =
1084 Caller->hasPersonalityFn()
1085 ? Caller->getPersonalityFn()->stripPointerCasts()
1087 if (CalledPersonality) {
1088 if (!CallerPersonality)
1089 Caller->setPersonalityFn(CalledPersonality);
1090 // If the personality functions match, then we can perform the
1091 // inlining. Otherwise, we can't inline.
1092 // TODO: This isn't 100% true. Some personality functions are proper
1093 // supersets of others and can be used in place of the other.
1094 else if (CalledPersonality != CallerPersonality)
1098 // Get an iterator to the last basic block in the function, which will have
1099 // the new function inlined after it.
1100 Function::iterator LastBlock = --Caller->end();
1102 // Make sure to capture all of the return instructions from the cloned
1104 SmallVector<ReturnInst*, 8> Returns;
1105 ClonedCodeInfo InlinedFunctionInfo;
1106 Function::iterator FirstNewBlock;
1108 { // Scope to destroy VMap after cloning.
1109 ValueToValueMapTy VMap;
1110 // Keep a list of pair (dst, src) to emit byval initializations.
1111 SmallVector<std::pair<Value*, Value*>, 4> ByValInit;
1113 auto &DL = Caller->getParent()->getDataLayout();
1115 assert(CalledFunc->arg_size() == CS.arg_size() &&
1116 "No varargs calls can be inlined!");
1118 // Calculate the vector of arguments to pass into the function cloner, which
1119 // matches up the formal to the actual argument values.
1120 CallSite::arg_iterator AI = CS.arg_begin();
1122 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
1123 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
1124 Value *ActualArg = *AI;
1126 // When byval arguments actually inlined, we need to make the copy implied
1127 // by them explicit. However, we don't do this if the callee is readonly
1128 // or readnone, because the copy would be unneeded: the callee doesn't
1129 // modify the struct.
1130 if (CS.isByValArgument(ArgNo)) {
1131 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
1132 CalledFunc->getParamAlignment(ArgNo+1));
1133 if (ActualArg != *AI)
1134 ByValInit.push_back(std::make_pair(ActualArg, (Value*) *AI));
1137 VMap[&*I] = ActualArg;
1140 // Add alignment assumptions if necessary. We do this before the inlined
1141 // instructions are actually cloned into the caller so that we can easily
1142 // check what will be known at the start of the inlined code.
1143 AddAlignmentAssumptions(CS, IFI);
1145 // We want the inliner to prune the code as it copies. We would LOVE to
1146 // have no dead or constant instructions leftover after inlining occurs
1147 // (which can happen, e.g., because an argument was constant), but we'll be
1148 // happy with whatever the cloner can do.
1149 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
1150 /*ModuleLevelChanges=*/false, Returns, ".i",
1151 &InlinedFunctionInfo, TheCall);
1153 // Remember the first block that is newly cloned over.
1154 FirstNewBlock = LastBlock; ++FirstNewBlock;
1156 // Inject byval arguments initialization.
1157 for (std::pair<Value*, Value*> &Init : ByValInit)
1158 HandleByValArgumentInit(Init.first, Init.second, Caller->getParent(),
1159 &*FirstNewBlock, IFI);
1161 if (CS.hasOperandBundles()) {
1162 auto ParentDeopt = CS.getOperandBundleAt(0);
1163 assert(ParentDeopt.getTagID() == LLVMContext::OB_deopt &&
1164 "Checked on entry!");
1166 SmallVector<OperandBundleDef, 2> OpDefs;
1168 for (auto &VH : InlinedFunctionInfo.OperandBundleCallSites) {
1169 Instruction *I = VH;
1174 OpDefs.reserve(ICS.getNumOperandBundles());
1176 for (unsigned i = 0, e = ICS.getNumOperandBundles(); i < e; ++i) {
1177 auto ChildOB = ICS.getOperandBundleAt(i);
1178 if (ChildOB.getTagID() != LLVMContext::OB_deopt) {
1179 // If the inlined call has other operand bundles, let them be
1180 OpDefs.emplace_back(ChildOB);
1184 // It may be useful to separate this logic (of handling operand
1185 // bundles) out to a separate "policy" component if this gets crowded.
1186 // Prepend the parent's deoptimization continuation to the newly
1187 // inlined call's deoptimization continuation.
1188 std::vector<Value *> MergedDeoptArgs;
1189 MergedDeoptArgs.reserve(ParentDeopt.Inputs.size() +
1190 ChildOB.Inputs.size());
1192 MergedDeoptArgs.insert(MergedDeoptArgs.end(),
1193 ParentDeopt.Inputs.begin(),
1194 ParentDeopt.Inputs.end());
1195 MergedDeoptArgs.insert(MergedDeoptArgs.end(), ChildOB.Inputs.begin(),
1196 ChildOB.Inputs.end());
1198 OpDefs.emplace_back(StringRef("deopt"), std::move(MergedDeoptArgs));
1201 Instruction *NewI = nullptr;
1202 if (isa<CallInst>(I))
1203 NewI = CallInst::Create(cast<CallInst>(I), OpDefs, I);
1205 NewI = InvokeInst::Create(cast<InvokeInst>(I), OpDefs, I);
1207 // Note: the RAUW does the appropriate fixup in VMap, so we need to do
1208 // this even if the call returns void.
1209 I->replaceAllUsesWith(NewI);
1212 I->eraseFromParent();
1216 // Update the callgraph if requested.
1218 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
1220 // Update inlined instructions' line number information.
1221 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
1223 // Clone existing noalias metadata if necessary.
1224 CloneAliasScopeMetadata(CS, VMap);
1226 // Add noalias metadata if necessary.
1227 AddAliasScopeMetadata(CS, VMap, DL, CalleeAAR);
1229 // FIXME: We could register any cloned assumptions instead of clearing the
1230 // whole function's cache.
1232 IFI.ACT->getAssumptionCache(*Caller).clear();
1235 // If there are any alloca instructions in the block that used to be the entry
1236 // block for the callee, move them to the entry block of the caller. First
1237 // calculate which instruction they should be inserted before. We insert the
1238 // instructions at the end of the current alloca list.
1240 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
1241 for (BasicBlock::iterator I = FirstNewBlock->begin(),
1242 E = FirstNewBlock->end(); I != E; ) {
1243 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
1246 // If the alloca is now dead, remove it. This often occurs due to code
1248 if (AI->use_empty()) {
1249 AI->eraseFromParent();
1253 if (!isa<Constant>(AI->getArraySize()))
1256 // Keep track of the static allocas that we inline into the caller.
1257 IFI.StaticAllocas.push_back(AI);
1259 // Scan for the block of allocas that we can move over, and move them
1261 while (isa<AllocaInst>(I) &&
1262 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
1263 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
1267 // Transfer all of the allocas over in a block. Using splice means
1268 // that the instructions aren't removed from the symbol table, then
1270 Caller->getEntryBlock().getInstList().splice(
1271 InsertPoint, FirstNewBlock->getInstList(), AI->getIterator(), I);
1273 // Move any dbg.declares describing the allocas into the entry basic block.
1274 DIBuilder DIB(*Caller->getParent());
1275 for (auto &AI : IFI.StaticAllocas)
1276 replaceDbgDeclareForAlloca(AI, AI, DIB, /*Deref=*/false);
1279 bool InlinedMustTailCalls = false;
1280 if (InlinedFunctionInfo.ContainsCalls) {
1281 CallInst::TailCallKind CallSiteTailKind = CallInst::TCK_None;
1282 if (CallInst *CI = dyn_cast<CallInst>(TheCall))
1283 CallSiteTailKind = CI->getTailCallKind();
1285 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E;
1287 for (Instruction &I : *BB) {
1288 CallInst *CI = dyn_cast<CallInst>(&I);
1292 // We need to reduce the strength of any inlined tail calls. For
1293 // musttail, we have to avoid introducing potential unbounded stack
1294 // growth. For example, if functions 'f' and 'g' are mutually recursive
1295 // with musttail, we can inline 'g' into 'f' so long as we preserve
1296 // musttail on the cloned call to 'f'. If either the inlined call site
1297 // or the cloned call site is *not* musttail, the program already has
1298 // one frame of stack growth, so it's safe to remove musttail. Here is
1299 // a table of example transformations:
1301 // f -> musttail g -> musttail f ==> f -> musttail f
1302 // f -> musttail g -> tail f ==> f -> tail f
1303 // f -> g -> musttail f ==> f -> f
1304 // f -> g -> tail f ==> f -> f
1305 CallInst::TailCallKind ChildTCK = CI->getTailCallKind();
1306 ChildTCK = std::min(CallSiteTailKind, ChildTCK);
1307 CI->setTailCallKind(ChildTCK);
1308 InlinedMustTailCalls |= CI->isMustTailCall();
1310 // Calls inlined through a 'nounwind' call site should be marked
1313 CI->setDoesNotThrow();
1318 // Leave lifetime markers for the static alloca's, scoping them to the
1319 // function we just inlined.
1320 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
1321 IRBuilder<> builder(&FirstNewBlock->front());
1322 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
1323 AllocaInst *AI = IFI.StaticAllocas[ai];
1325 // If the alloca is already scoped to something smaller than the whole
1326 // function then there's no need to add redundant, less accurate markers.
1327 if (hasLifetimeMarkers(AI))
1330 // Try to determine the size of the allocation.
1331 ConstantInt *AllocaSize = nullptr;
1332 if (ConstantInt *AIArraySize =
1333 dyn_cast<ConstantInt>(AI->getArraySize())) {
1334 auto &DL = Caller->getParent()->getDataLayout();
1335 Type *AllocaType = AI->getAllocatedType();
1336 uint64_t AllocaTypeSize = DL.getTypeAllocSize(AllocaType);
1337 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
1339 // Don't add markers for zero-sized allocas.
1340 if (AllocaArraySize == 0)
1343 // Check that array size doesn't saturate uint64_t and doesn't
1344 // overflow when it's multiplied by type size.
1345 if (AllocaArraySize != ~0ULL &&
1346 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
1347 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
1348 AllocaArraySize * AllocaTypeSize);
1352 builder.CreateLifetimeStart(AI, AllocaSize);
1353 for (ReturnInst *RI : Returns) {
1354 // Don't insert llvm.lifetime.end calls between a musttail call and a
1355 // return. The return kills all local allocas.
1356 if (InlinedMustTailCalls &&
1357 RI->getParent()->getTerminatingMustTailCall())
1359 IRBuilder<>(RI).CreateLifetimeEnd(AI, AllocaSize);
1364 // If the inlined code contained dynamic alloca instructions, wrap the inlined
1365 // code with llvm.stacksave/llvm.stackrestore intrinsics.
1366 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
1367 Module *M = Caller->getParent();
1368 // Get the two intrinsics we care about.
1369 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
1370 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
1372 // Insert the llvm.stacksave.
1373 CallInst *SavedPtr = IRBuilder<>(&*FirstNewBlock, FirstNewBlock->begin())
1374 .CreateCall(StackSave, {}, "savedstack");
1376 // Insert a call to llvm.stackrestore before any return instructions in the
1377 // inlined function.
1378 for (ReturnInst *RI : Returns) {
1379 // Don't insert llvm.stackrestore calls between a musttail call and a
1380 // return. The return will restore the stack pointer.
1381 if (InlinedMustTailCalls && RI->getParent()->getTerminatingMustTailCall())
1383 IRBuilder<>(RI).CreateCall(StackRestore, SavedPtr);
1387 // If we are inlining for an invoke instruction, we must make sure to rewrite
1388 // any call instructions into invoke instructions.
1389 if (auto *II = dyn_cast<InvokeInst>(TheCall)) {
1390 BasicBlock *UnwindDest = II->getUnwindDest();
1391 Instruction *FirstNonPHI = UnwindDest->getFirstNonPHI();
1392 if (isa<LandingPadInst>(FirstNonPHI)) {
1393 HandleInlinedLandingPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1395 HandleInlinedEHPad(II, &*FirstNewBlock, InlinedFunctionInfo);
1399 // Handle any inlined musttail call sites. In order for a new call site to be
1400 // musttail, the source of the clone and the inlined call site must have been
1401 // musttail. Therefore it's safe to return without merging control into the
1403 if (InlinedMustTailCalls) {
1404 // Check if we need to bitcast the result of any musttail calls.
1405 Type *NewRetTy = Caller->getReturnType();
1406 bool NeedBitCast = !TheCall->use_empty() && TheCall->getType() != NewRetTy;
1408 // Handle the returns preceded by musttail calls separately.
1409 SmallVector<ReturnInst *, 8> NormalReturns;
1410 for (ReturnInst *RI : Returns) {
1411 CallInst *ReturnedMustTail =
1412 RI->getParent()->getTerminatingMustTailCall();
1413 if (!ReturnedMustTail) {
1414 NormalReturns.push_back(RI);
1420 // Delete the old return and any preceding bitcast.
1421 BasicBlock *CurBB = RI->getParent();
1422 auto *OldCast = dyn_cast_or_null<BitCastInst>(RI->getReturnValue());
1423 RI->eraseFromParent();
1425 OldCast->eraseFromParent();
1427 // Insert a new bitcast and return with the right type.
1428 IRBuilder<> Builder(CurBB);
1429 Builder.CreateRet(Builder.CreateBitCast(ReturnedMustTail, NewRetTy));
1432 // Leave behind the normal returns so we can merge control flow.
1433 std::swap(Returns, NormalReturns);
1436 // If we cloned in _exactly one_ basic block, and if that block ends in a
1437 // return instruction, we splice the body of the inlined callee directly into
1438 // the calling basic block.
1439 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
1440 // Move all of the instructions right before the call.
1441 OrigBB->getInstList().splice(TheCall->getIterator(),
1442 FirstNewBlock->getInstList(),
1443 FirstNewBlock->begin(), FirstNewBlock->end());
1444 // Remove the cloned basic block.
1445 Caller->getBasicBlockList().pop_back();
1447 // If the call site was an invoke instruction, add a branch to the normal
1449 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1450 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
1451 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
1454 // If the return instruction returned a value, replace uses of the call with
1455 // uses of the returned value.
1456 if (!TheCall->use_empty()) {
1457 ReturnInst *R = Returns[0];
1458 if (TheCall == R->getReturnValue())
1459 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1461 TheCall->replaceAllUsesWith(R->getReturnValue());
1463 // Since we are now done with the Call/Invoke, we can delete it.
1464 TheCall->eraseFromParent();
1466 // Since we are now done with the return instruction, delete it also.
1467 Returns[0]->eraseFromParent();
1469 // We are now done with the inlining.
1473 // Otherwise, we have the normal case, of more than one block to inline or
1474 // multiple return sites.
1476 // We want to clone the entire callee function into the hole between the
1477 // "starter" and "ender" blocks. How we accomplish this depends on whether
1478 // this is an invoke instruction or a call instruction.
1479 BasicBlock *AfterCallBB;
1480 BranchInst *CreatedBranchToNormalDest = nullptr;
1481 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
1483 // Add an unconditional branch to make this look like the CallInst case...
1484 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
1486 // Split the basic block. This guarantees that no PHI nodes will have to be
1487 // updated due to new incoming edges, and make the invoke case more
1488 // symmetric to the call case.
1490 OrigBB->splitBasicBlock(CreatedBranchToNormalDest->getIterator(),
1491 CalledFunc->getName() + ".exit");
1493 } else { // It's a call
1494 // If this is a call instruction, we need to split the basic block that
1495 // the call lives in.
1497 AfterCallBB = OrigBB->splitBasicBlock(TheCall->getIterator(),
1498 CalledFunc->getName() + ".exit");
1501 // Change the branch that used to go to AfterCallBB to branch to the first
1502 // basic block of the inlined function.
1504 TerminatorInst *Br = OrigBB->getTerminator();
1505 assert(Br && Br->getOpcode() == Instruction::Br &&
1506 "splitBasicBlock broken!");
1507 Br->setOperand(0, &*FirstNewBlock);
1509 // Now that the function is correct, make it a little bit nicer. In
1510 // particular, move the basic blocks inserted from the end of the function
1511 // into the space made by splitting the source basic block.
1512 Caller->getBasicBlockList().splice(AfterCallBB->getIterator(),
1513 Caller->getBasicBlockList(), FirstNewBlock,
1516 // Handle all of the return instructions that we just cloned in, and eliminate
1517 // any users of the original call/invoke instruction.
1518 Type *RTy = CalledFunc->getReturnType();
1520 PHINode *PHI = nullptr;
1521 if (Returns.size() > 1) {
1522 // The PHI node should go at the front of the new basic block to merge all
1523 // possible incoming values.
1524 if (!TheCall->use_empty()) {
1525 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
1526 &AfterCallBB->front());
1527 // Anything that used the result of the function call should now use the
1528 // PHI node as their operand.
1529 TheCall->replaceAllUsesWith(PHI);
1532 // Loop over all of the return instructions adding entries to the PHI node
1535 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1536 ReturnInst *RI = Returns[i];
1537 assert(RI->getReturnValue()->getType() == PHI->getType() &&
1538 "Ret value not consistent in function!");
1539 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
1543 // Add a branch to the merge points and remove return instructions.
1545 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
1546 ReturnInst *RI = Returns[i];
1547 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
1548 Loc = RI->getDebugLoc();
1549 BI->setDebugLoc(Loc);
1550 RI->eraseFromParent();
1552 // We need to set the debug location to *somewhere* inside the
1553 // inlined function. The line number may be nonsensical, but the
1554 // instruction will at least be associated with the right
1556 if (CreatedBranchToNormalDest)
1557 CreatedBranchToNormalDest->setDebugLoc(Loc);
1558 } else if (!Returns.empty()) {
1559 // Otherwise, if there is exactly one return value, just replace anything
1560 // using the return value of the call with the computed value.
1561 if (!TheCall->use_empty()) {
1562 if (TheCall == Returns[0]->getReturnValue())
1563 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1565 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
1568 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
1569 BasicBlock *ReturnBB = Returns[0]->getParent();
1570 ReturnBB->replaceAllUsesWith(AfterCallBB);
1572 // Splice the code from the return block into the block that it will return
1573 // to, which contains the code that was after the call.
1574 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
1575 ReturnBB->getInstList());
1577 if (CreatedBranchToNormalDest)
1578 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
1580 // Delete the return instruction now and empty ReturnBB now.
1581 Returns[0]->eraseFromParent();
1582 ReturnBB->eraseFromParent();
1583 } else if (!TheCall->use_empty()) {
1584 // No returns, but something is using the return value of the call. Just
1586 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
1589 // Since we are now done with the Call/Invoke, we can delete it.
1590 TheCall->eraseFromParent();
1592 // If we inlined any musttail calls and the original return is now
1593 // unreachable, delete it. It can only contain a bitcast and ret.
1594 if (InlinedMustTailCalls && pred_begin(AfterCallBB) == pred_end(AfterCallBB))
1595 AfterCallBB->eraseFromParent();
1597 // We should always be able to fold the entry block of the function into the
1598 // single predecessor of the block...
1599 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
1600 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
1602 // Splice the code entry block into calling block, right before the
1603 // unconditional branch.
1604 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
1605 OrigBB->getInstList().splice(Br->getIterator(), CalleeEntry->getInstList());
1607 // Remove the unconditional branch.
1608 OrigBB->getInstList().erase(Br);
1610 // Now we can remove the CalleeEntry block, which is now empty.
1611 Caller->getBasicBlockList().erase(CalleeEntry);
1613 // If we inserted a phi node, check to see if it has a single value (e.g. all
1614 // the entries are the same or undef). If so, remove the PHI so it doesn't
1615 // block other optimizations.
1617 auto &DL = Caller->getParent()->getDataLayout();
1618 if (Value *V = SimplifyInstruction(PHI, DL, nullptr, nullptr,
1619 &IFI.ACT->getAssumptionCache(*Caller))) {
1620 PHI->replaceAllUsesWith(V);
1621 PHI->eraseFromParent();