1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/ADT/StringExtras.h"
18 #include "llvm/Analysis/CallGraph.h"
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/IR/Attributes.h"
21 #include "llvm/IR/CallSite.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DebugInfo.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/Instructions.h"
28 #include "llvm/IR/IntrinsicInst.h"
29 #include "llvm/IR/Intrinsics.h"
30 #include "llvm/IR/Module.h"
31 #include "llvm/Transforms/Utils/Local.h"
34 bool llvm::InlineFunction(CallInst *CI, InlineFunctionInfo &IFI,
35 bool InsertLifetime) {
36 return InlineFunction(CallSite(CI), IFI, InsertLifetime);
38 bool llvm::InlineFunction(InvokeInst *II, InlineFunctionInfo &IFI,
39 bool InsertLifetime) {
40 return InlineFunction(CallSite(II), IFI, InsertLifetime);
44 /// A class for recording information about inlining through an invoke.
45 class InvokeInliningInfo {
46 BasicBlock *OuterResumeDest; ///< Destination of the invoke's unwind.
47 BasicBlock *InnerResumeDest; ///< Destination for the callee's resume.
48 LandingPadInst *CallerLPad; ///< LandingPadInst associated with the invoke.
49 PHINode *InnerEHValuesPHI; ///< PHI for EH values from landingpad insts.
50 SmallVector<Value*, 8> UnwindDestPHIValues;
53 InvokeInliningInfo(InvokeInst *II)
54 : OuterResumeDest(II->getUnwindDest()), InnerResumeDest(0),
55 CallerLPad(0), InnerEHValuesPHI(0) {
56 // If there are PHI nodes in the unwind destination block, we need to keep
57 // track of which values came into them from the invoke before removing
58 // the edge from this block.
59 llvm::BasicBlock *InvokeBB = II->getParent();
60 BasicBlock::iterator I = OuterResumeDest->begin();
61 for (; isa<PHINode>(I); ++I) {
62 // Save the value to use for this edge.
63 PHINode *PHI = cast<PHINode>(I);
64 UnwindDestPHIValues.push_back(PHI->getIncomingValueForBlock(InvokeBB));
67 CallerLPad = cast<LandingPadInst>(I);
70 /// getOuterResumeDest - The outer unwind destination is the target of
71 /// unwind edges introduced for calls within the inlined function.
72 BasicBlock *getOuterResumeDest() const {
73 return OuterResumeDest;
76 BasicBlock *getInnerResumeDest();
78 LandingPadInst *getLandingPadInst() const { return CallerLPad; }
80 /// forwardResume - Forward the 'resume' instruction to the caller's landing
81 /// pad block. When the landing pad block has only one predecessor, this is
82 /// a simple branch. When there is more than one predecessor, we need to
83 /// split the landing pad block after the landingpad instruction and jump
85 void forwardResume(ResumeInst *RI,
86 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads);
88 /// addIncomingPHIValuesFor - Add incoming-PHI values to the unwind
89 /// destination block for the given basic block, using the values for the
90 /// original invoke's source block.
91 void addIncomingPHIValuesFor(BasicBlock *BB) const {
92 addIncomingPHIValuesForInto(BB, OuterResumeDest);
95 void addIncomingPHIValuesForInto(BasicBlock *src, BasicBlock *dest) const {
96 BasicBlock::iterator I = dest->begin();
97 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
98 PHINode *phi = cast<PHINode>(I);
99 phi->addIncoming(UnwindDestPHIValues[i], src);
105 /// getInnerResumeDest - Get or create a target for the branch from ResumeInsts.
106 BasicBlock *InvokeInliningInfo::getInnerResumeDest() {
107 if (InnerResumeDest) return InnerResumeDest;
109 // Split the landing pad.
110 BasicBlock::iterator SplitPoint = CallerLPad; ++SplitPoint;
112 OuterResumeDest->splitBasicBlock(SplitPoint,
113 OuterResumeDest->getName() + ".body");
115 // The number of incoming edges we expect to the inner landing pad.
116 const unsigned PHICapacity = 2;
118 // Create corresponding new PHIs for all the PHIs in the outer landing pad.
119 BasicBlock::iterator InsertPoint = InnerResumeDest->begin();
120 BasicBlock::iterator I = OuterResumeDest->begin();
121 for (unsigned i = 0, e = UnwindDestPHIValues.size(); i != e; ++i, ++I) {
122 PHINode *OuterPHI = cast<PHINode>(I);
123 PHINode *InnerPHI = PHINode::Create(OuterPHI->getType(), PHICapacity,
124 OuterPHI->getName() + ".lpad-body",
126 OuterPHI->replaceAllUsesWith(InnerPHI);
127 InnerPHI->addIncoming(OuterPHI, OuterResumeDest);
130 // Create a PHI for the exception values.
131 InnerEHValuesPHI = PHINode::Create(CallerLPad->getType(), PHICapacity,
132 "eh.lpad-body", InsertPoint);
133 CallerLPad->replaceAllUsesWith(InnerEHValuesPHI);
134 InnerEHValuesPHI->addIncoming(CallerLPad, OuterResumeDest);
137 return InnerResumeDest;
140 /// forwardResume - Forward the 'resume' instruction to the caller's landing pad
141 /// block. When the landing pad block has only one predecessor, this is a simple
142 /// branch. When there is more than one predecessor, we need to split the
143 /// landing pad block after the landingpad instruction and jump to there.
144 void InvokeInliningInfo::forwardResume(ResumeInst *RI,
145 SmallPtrSet<LandingPadInst*, 16> &InlinedLPads) {
146 BasicBlock *Dest = getInnerResumeDest();
147 BasicBlock *Src = RI->getParent();
149 BranchInst::Create(Dest, Src);
151 // Update the PHIs in the destination. They were inserted in an order which
153 addIncomingPHIValuesForInto(Src, Dest);
155 InnerEHValuesPHI->addIncoming(RI->getOperand(0), Src);
156 RI->eraseFromParent();
159 /// HandleCallsInBlockInlinedThroughInvoke - When we inline a basic block into
160 /// an invoke, we have to turn all of the calls that can throw into
161 /// invokes. This function analyze BB to see if there are any calls, and if so,
162 /// it rewrites them to be invokes that jump to InvokeDest and fills in the PHI
163 /// nodes in that block with the values specified in InvokeDestPHIValues.
164 static void HandleCallsInBlockInlinedThroughInvoke(BasicBlock *BB,
165 InvokeInliningInfo &Invoke) {
166 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ) {
167 Instruction *I = BBI++;
169 // We only need to check for function calls: inlined invoke
170 // instructions require no special handling.
171 CallInst *CI = dyn_cast<CallInst>(I);
173 // If this call cannot unwind, don't convert it to an invoke.
174 // Inline asm calls cannot throw.
175 if (!CI || CI->doesNotThrow() || isa<InlineAsm>(CI->getCalledValue()))
178 // Convert this function call into an invoke instruction. First, split the
180 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
182 // Delete the unconditional branch inserted by splitBasicBlock
183 BB->getInstList().pop_back();
185 // Create the new invoke instruction.
186 ImmutableCallSite CS(CI);
187 SmallVector<Value*, 8> InvokeArgs(CS.arg_begin(), CS.arg_end());
188 InvokeInst *II = InvokeInst::Create(CI->getCalledValue(), Split,
189 Invoke.getOuterResumeDest(),
190 InvokeArgs, CI->getName(), BB);
191 II->setCallingConv(CI->getCallingConv());
192 II->setAttributes(CI->getAttributes());
194 // Make sure that anything using the call now uses the invoke! This also
195 // updates the CallGraph if present, because it uses a WeakVH.
196 CI->replaceAllUsesWith(II);
198 // Delete the original call
199 Split->getInstList().pop_front();
201 // Update any PHI nodes in the exceptional block to indicate that there is
202 // now a new entry in them.
203 Invoke.addIncomingPHIValuesFor(BB);
208 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
209 /// in the body of the inlined function into invokes.
211 /// II is the invoke instruction being inlined. FirstNewBlock is the first
212 /// block of the inlined code (the last block is the end of the function),
213 /// and InlineCodeInfo is information about the code that got inlined.
214 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
215 ClonedCodeInfo &InlinedCodeInfo) {
216 BasicBlock *InvokeDest = II->getUnwindDest();
218 Function *Caller = FirstNewBlock->getParent();
220 // The inlined code is currently at the end of the function, scan from the
221 // start of the inlined code to its end, checking for stuff we need to
223 InvokeInliningInfo Invoke(II);
225 // Get all of the inlined landing pad instructions.
226 SmallPtrSet<LandingPadInst*, 16> InlinedLPads;
227 for (Function::iterator I = FirstNewBlock, E = Caller->end(); I != E; ++I)
228 if (InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator()))
229 InlinedLPads.insert(II->getLandingPadInst());
231 // Append the clauses from the outer landing pad instruction into the inlined
232 // landing pad instructions.
233 LandingPadInst *OuterLPad = Invoke.getLandingPadInst();
234 for (SmallPtrSet<LandingPadInst*, 16>::iterator I = InlinedLPads.begin(),
235 E = InlinedLPads.end(); I != E; ++I) {
236 LandingPadInst *InlinedLPad = *I;
237 unsigned OuterNum = OuterLPad->getNumClauses();
238 InlinedLPad->reserveClauses(OuterNum);
239 for (unsigned OuterIdx = 0; OuterIdx != OuterNum; ++OuterIdx)
240 InlinedLPad->addClause(OuterLPad->getClause(OuterIdx));
241 if (OuterLPad->isCleanup())
242 InlinedLPad->setCleanup(true);
245 for (Function::iterator BB = FirstNewBlock, E = Caller->end(); BB != E; ++BB){
246 if (InlinedCodeInfo.ContainsCalls)
247 HandleCallsInBlockInlinedThroughInvoke(BB, Invoke);
249 // Forward any resumes that are remaining here.
250 if (ResumeInst *RI = dyn_cast<ResumeInst>(BB->getTerminator()))
251 Invoke.forwardResume(RI, InlinedLPads);
254 // Now that everything is happy, we have one final detail. The PHI nodes in
255 // the exception destination block still have entries due to the original
256 // invoke instruction. Eliminate these entries (which might even delete the
258 InvokeDest->removePredecessor(II->getParent());
261 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
262 /// into the caller, update the specified callgraph to reflect the changes we
263 /// made. Note that it's possible that not all code was copied over, so only
264 /// some edges of the callgraph may remain.
265 static void UpdateCallGraphAfterInlining(CallSite CS,
266 Function::iterator FirstNewBlock,
267 ValueToValueMapTy &VMap,
268 InlineFunctionInfo &IFI) {
269 CallGraph &CG = *IFI.CG;
270 const Function *Caller = CS.getInstruction()->getParent()->getParent();
271 const Function *Callee = CS.getCalledFunction();
272 CallGraphNode *CalleeNode = CG[Callee];
273 CallGraphNode *CallerNode = CG[Caller];
275 // Since we inlined some uninlined call sites in the callee into the caller,
276 // add edges from the caller to all of the callees of the callee.
277 CallGraphNode::iterator I = CalleeNode->begin(), E = CalleeNode->end();
279 // Consider the case where CalleeNode == CallerNode.
280 CallGraphNode::CalledFunctionsVector CallCache;
281 if (CalleeNode == CallerNode) {
282 CallCache.assign(I, E);
283 I = CallCache.begin();
287 for (; I != E; ++I) {
288 const Value *OrigCall = I->first;
290 ValueToValueMapTy::iterator VMI = VMap.find(OrigCall);
291 // Only copy the edge if the call was inlined!
292 if (VMI == VMap.end() || VMI->second == 0)
295 // If the call was inlined, but then constant folded, there is no edge to
296 // add. Check for this case.
297 Instruction *NewCall = dyn_cast<Instruction>(VMI->second);
298 if (NewCall == 0) continue;
300 // Remember that this call site got inlined for the client of
302 IFI.InlinedCalls.push_back(NewCall);
304 // It's possible that inlining the callsite will cause it to go from an
305 // indirect to a direct call by resolving a function pointer. If this
306 // happens, set the callee of the new call site to a more precise
307 // destination. This can also happen if the call graph node of the caller
308 // was just unnecessarily imprecise.
309 if (I->second->getFunction() == 0)
310 if (Function *F = CallSite(NewCall).getCalledFunction()) {
311 // Indirect call site resolved to direct call.
312 CallerNode->addCalledFunction(CallSite(NewCall), CG[F]);
317 CallerNode->addCalledFunction(CallSite(NewCall), I->second);
320 // Update the call graph by deleting the edge from Callee to Caller. We must
321 // do this after the loop above in case Caller and Callee are the same.
322 CallerNode->removeCallEdgeFor(CS);
325 /// HandleByValArgument - When inlining a call site that has a byval argument,
326 /// we have to make the implicit memcpy explicit by adding it.
327 static Value *HandleByValArgument(Value *Arg, Instruction *TheCall,
328 const Function *CalledFunc,
329 InlineFunctionInfo &IFI,
330 unsigned ByValAlignment) {
331 Type *AggTy = cast<PointerType>(Arg->getType())->getElementType();
333 // If the called function is readonly, then it could not mutate the caller's
334 // copy of the byval'd memory. In this case, it is safe to elide the copy and
336 if (CalledFunc->onlyReadsMemory()) {
337 // If the byval argument has a specified alignment that is greater than the
338 // passed in pointer, then we either have to round up the input pointer or
339 // give up on this transformation.
340 if (ByValAlignment <= 1) // 0 = unspecified, 1 = no particular alignment.
343 // If the pointer is already known to be sufficiently aligned, or if we can
344 // round it up to a larger alignment, then we don't need a temporary.
345 if (getOrEnforceKnownAlignment(Arg, ByValAlignment,
346 IFI.DL) >= ByValAlignment)
349 // Otherwise, we have to make a memcpy to get a safe alignment. This is bad
350 // for code quality, but rarely happens and is required for correctness.
353 LLVMContext &Context = Arg->getContext();
355 Type *VoidPtrTy = Type::getInt8PtrTy(Context);
357 // Create the alloca. If we have DataLayout, use nice alignment.
360 Align = IFI.DL->getPrefTypeAlignment(AggTy);
362 // If the byval had an alignment specified, we *must* use at least that
363 // alignment, as it is required by the byval argument (and uses of the
364 // pointer inside the callee).
365 Align = std::max(Align, ByValAlignment);
367 Function *Caller = TheCall->getParent()->getParent();
369 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, Arg->getName(),
370 &*Caller->begin()->begin());
372 Type *Tys[3] = {VoidPtrTy, VoidPtrTy, Type::getInt64Ty(Context)};
373 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
376 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
377 Value *SrcCast = new BitCastInst(Arg, VoidPtrTy, "tmp", TheCall);
381 Size = ConstantExpr::getSizeOf(AggTy);
383 Size = ConstantInt::get(Type::getInt64Ty(Context),
384 IFI.DL->getTypeStoreSize(AggTy));
386 // Always generate a memcpy of alignment 1 here because we don't know
387 // the alignment of the src pointer. Other optimizations can infer
389 Value *CallArgs[] = {
390 DestCast, SrcCast, Size,
391 ConstantInt::get(Type::getInt32Ty(Context), 1),
392 ConstantInt::getFalse(Context) // isVolatile
394 IRBuilder<>(TheCall).CreateCall(MemCpyFn, CallArgs);
396 // Uses of the argument in the function should use our new alloca
401 // isUsedByLifetimeMarker - Check whether this Value is used by a lifetime
403 static bool isUsedByLifetimeMarker(Value *V) {
404 for (User *U : V->users()) {
405 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(U)) {
406 switch (II->getIntrinsicID()) {
408 case Intrinsic::lifetime_start:
409 case Intrinsic::lifetime_end:
417 // hasLifetimeMarkers - Check whether the given alloca already has
418 // lifetime.start or lifetime.end intrinsics.
419 static bool hasLifetimeMarkers(AllocaInst *AI) {
420 Type *Int8PtrTy = Type::getInt8PtrTy(AI->getType()->getContext());
421 if (AI->getType() == Int8PtrTy)
422 return isUsedByLifetimeMarker(AI);
424 // Do a scan to find all the casts to i8*.
425 for (User *U : AI->users()) {
426 if (U->getType() != Int8PtrTy) continue;
427 if (U->stripPointerCasts() != AI) continue;
428 if (isUsedByLifetimeMarker(U))
434 /// updateInlinedAtInfo - Helper function used by fixupLineNumbers to
435 /// recursively update InlinedAtEntry of a DebugLoc.
436 static DebugLoc updateInlinedAtInfo(const DebugLoc &DL,
437 const DebugLoc &InlinedAtDL,
439 if (MDNode *IA = DL.getInlinedAt(Ctx)) {
440 DebugLoc NewInlinedAtDL
441 = updateInlinedAtInfo(DebugLoc::getFromDILocation(IA), InlinedAtDL, Ctx);
442 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
443 NewInlinedAtDL.getAsMDNode(Ctx));
446 return DebugLoc::get(DL.getLine(), DL.getCol(), DL.getScope(Ctx),
447 InlinedAtDL.getAsMDNode(Ctx));
450 /// fixupLineNumbers - Update inlined instructions' line numbers to
451 /// to encode location where these instructions are inlined.
452 static void fixupLineNumbers(Function *Fn, Function::iterator FI,
453 Instruction *TheCall) {
454 DebugLoc TheCallDL = TheCall->getDebugLoc();
455 if (TheCallDL.isUnknown())
458 for (; FI != Fn->end(); ++FI) {
459 for (BasicBlock::iterator BI = FI->begin(), BE = FI->end();
461 DebugLoc DL = BI->getDebugLoc();
462 if (!DL.isUnknown()) {
463 BI->setDebugLoc(updateInlinedAtInfo(DL, TheCallDL, BI->getContext()));
464 if (DbgValueInst *DVI = dyn_cast<DbgValueInst>(BI)) {
465 LLVMContext &Ctx = BI->getContext();
466 MDNode *InlinedAt = BI->getDebugLoc().getInlinedAt(Ctx);
467 DVI->setOperand(2, createInlinedVariable(DVI->getVariable(),
475 /// InlineFunction - This function inlines the called function into the basic
476 /// block of the caller. This returns false if it is not possible to inline
477 /// this call. The program is still in a well defined state if this occurs
480 /// Note that this only does one level of inlining. For example, if the
481 /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
482 /// exists in the instruction stream. Similarly this will inline a recursive
483 /// function by one level.
484 bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI,
485 bool InsertLifetime) {
486 Instruction *TheCall = CS.getInstruction();
487 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
488 "Instruction not in function!");
490 // If IFI has any state in it, zap it before we fill it in.
493 const Function *CalledFunc = CS.getCalledFunction();
494 if (CalledFunc == 0 || // Can't inline external function or indirect
495 CalledFunc->isDeclaration() || // call, or call to a vararg function!
496 CalledFunc->getFunctionType()->isVarArg()) return false;
498 // If the call to the callee is not a tail call, we must clear the 'tail'
499 // flags on any calls that we inline.
500 bool MustClearTailCallFlags =
501 !(isa<CallInst>(TheCall) && cast<CallInst>(TheCall)->isTailCall());
503 // If the call to the callee cannot throw, set the 'nounwind' flag on any
504 // calls that we inline.
505 bool MarkNoUnwind = CS.doesNotThrow();
507 BasicBlock *OrigBB = TheCall->getParent();
508 Function *Caller = OrigBB->getParent();
510 // GC poses two hazards to inlining, which only occur when the callee has GC:
511 // 1. If the caller has no GC, then the callee's GC must be propagated to the
513 // 2. If the caller has a differing GC, it is invalid to inline.
514 if (CalledFunc->hasGC()) {
515 if (!Caller->hasGC())
516 Caller->setGC(CalledFunc->getGC());
517 else if (CalledFunc->getGC() != Caller->getGC())
521 // Get the personality function from the callee if it contains a landing pad.
522 Value *CalleePersonality = 0;
523 for (Function::const_iterator I = CalledFunc->begin(), E = CalledFunc->end();
525 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
526 const BasicBlock *BB = II->getUnwindDest();
527 const LandingPadInst *LP = BB->getLandingPadInst();
528 CalleePersonality = LP->getPersonalityFn();
532 // Find the personality function used by the landing pads of the caller. If it
533 // exists, then check to see that it matches the personality function used in
535 if (CalleePersonality) {
536 for (Function::const_iterator I = Caller->begin(), E = Caller->end();
538 if (const InvokeInst *II = dyn_cast<InvokeInst>(I->getTerminator())) {
539 const BasicBlock *BB = II->getUnwindDest();
540 const LandingPadInst *LP = BB->getLandingPadInst();
542 // If the personality functions match, then we can perform the
543 // inlining. Otherwise, we can't inline.
544 // TODO: This isn't 100% true. Some personality functions are proper
545 // supersets of others and can be used in place of the other.
546 if (LP->getPersonalityFn() != CalleePersonality)
553 // Get an iterator to the last basic block in the function, which will have
554 // the new function inlined after it.
555 Function::iterator LastBlock = &Caller->back();
557 // Make sure to capture all of the return instructions from the cloned
559 SmallVector<ReturnInst*, 8> Returns;
560 ClonedCodeInfo InlinedFunctionInfo;
561 Function::iterator FirstNewBlock;
563 { // Scope to destroy VMap after cloning.
564 ValueToValueMapTy VMap;
566 assert(CalledFunc->arg_size() == CS.arg_size() &&
567 "No varargs calls can be inlined!");
569 // Calculate the vector of arguments to pass into the function cloner, which
570 // matches up the formal to the actual argument values.
571 CallSite::arg_iterator AI = CS.arg_begin();
573 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
574 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
575 Value *ActualArg = *AI;
577 // When byval arguments actually inlined, we need to make the copy implied
578 // by them explicit. However, we don't do this if the callee is readonly
579 // or readnone, because the copy would be unneeded: the callee doesn't
580 // modify the struct.
581 if (CS.isByValArgument(ArgNo)) {
582 ActualArg = HandleByValArgument(ActualArg, TheCall, CalledFunc, IFI,
583 CalledFunc->getParamAlignment(ArgNo+1));
585 // Calls that we inline may use the new alloca, so we need to clear
586 // their 'tail' flags if HandleByValArgument introduced a new alloca and
587 // the callee has calls.
588 MustClearTailCallFlags |= ActualArg != *AI;
594 // We want the inliner to prune the code as it copies. We would LOVE to
595 // have no dead or constant instructions leftover after inlining occurs
596 // (which can happen, e.g., because an argument was constant), but we'll be
597 // happy with whatever the cloner can do.
598 CloneAndPruneFunctionInto(Caller, CalledFunc, VMap,
599 /*ModuleLevelChanges=*/false, Returns, ".i",
600 &InlinedFunctionInfo, IFI.DL, TheCall);
602 // Remember the first block that is newly cloned over.
603 FirstNewBlock = LastBlock; ++FirstNewBlock;
605 // Update the callgraph if requested.
607 UpdateCallGraphAfterInlining(CS, FirstNewBlock, VMap, IFI);
609 // Update inlined instructions' line number information.
610 fixupLineNumbers(Caller, FirstNewBlock, TheCall);
613 // If there are any alloca instructions in the block that used to be the entry
614 // block for the callee, move them to the entry block of the caller. First
615 // calculate which instruction they should be inserted before. We insert the
616 // instructions at the end of the current alloca list.
618 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
619 for (BasicBlock::iterator I = FirstNewBlock->begin(),
620 E = FirstNewBlock->end(); I != E; ) {
621 AllocaInst *AI = dyn_cast<AllocaInst>(I++);
622 if (AI == 0) continue;
624 // If the alloca is now dead, remove it. This often occurs due to code
626 if (AI->use_empty()) {
627 AI->eraseFromParent();
631 if (!isa<Constant>(AI->getArraySize()))
634 // Keep track of the static allocas that we inline into the caller.
635 IFI.StaticAllocas.push_back(AI);
637 // Scan for the block of allocas that we can move over, and move them
639 while (isa<AllocaInst>(I) &&
640 isa<Constant>(cast<AllocaInst>(I)->getArraySize())) {
641 IFI.StaticAllocas.push_back(cast<AllocaInst>(I));
645 // Transfer all of the allocas over in a block. Using splice means
646 // that the instructions aren't removed from the symbol table, then
648 Caller->getEntryBlock().getInstList().splice(InsertPoint,
649 FirstNewBlock->getInstList(),
654 // Leave lifetime markers for the static alloca's, scoping them to the
655 // function we just inlined.
656 if (InsertLifetime && !IFI.StaticAllocas.empty()) {
657 IRBuilder<> builder(FirstNewBlock->begin());
658 for (unsigned ai = 0, ae = IFI.StaticAllocas.size(); ai != ae; ++ai) {
659 AllocaInst *AI = IFI.StaticAllocas[ai];
661 // If the alloca is already scoped to something smaller than the whole
662 // function then there's no need to add redundant, less accurate markers.
663 if (hasLifetimeMarkers(AI))
666 // Try to determine the size of the allocation.
667 ConstantInt *AllocaSize = 0;
668 if (ConstantInt *AIArraySize =
669 dyn_cast<ConstantInt>(AI->getArraySize())) {
671 Type *AllocaType = AI->getAllocatedType();
672 uint64_t AllocaTypeSize = IFI.DL->getTypeAllocSize(AllocaType);
673 uint64_t AllocaArraySize = AIArraySize->getLimitedValue();
674 assert(AllocaArraySize > 0 && "array size of AllocaInst is zero");
675 // Check that array size doesn't saturate uint64_t and doesn't
676 // overflow when it's multiplied by type size.
677 if (AllocaArraySize != ~0ULL &&
678 UINT64_MAX / AllocaArraySize >= AllocaTypeSize) {
679 AllocaSize = ConstantInt::get(Type::getInt64Ty(AI->getContext()),
680 AllocaArraySize * AllocaTypeSize);
685 builder.CreateLifetimeStart(AI, AllocaSize);
686 for (unsigned ri = 0, re = Returns.size(); ri != re; ++ri) {
687 IRBuilder<> builder(Returns[ri]);
688 builder.CreateLifetimeEnd(AI, AllocaSize);
693 // If the inlined code contained dynamic alloca instructions, wrap the inlined
694 // code with llvm.stacksave/llvm.stackrestore intrinsics.
695 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
696 Module *M = Caller->getParent();
697 // Get the two intrinsics we care about.
698 Function *StackSave = Intrinsic::getDeclaration(M, Intrinsic::stacksave);
699 Function *StackRestore=Intrinsic::getDeclaration(M,Intrinsic::stackrestore);
701 // Insert the llvm.stacksave.
702 CallInst *SavedPtr = IRBuilder<>(FirstNewBlock, FirstNewBlock->begin())
703 .CreateCall(StackSave, "savedstack");
705 // Insert a call to llvm.stackrestore before any return instructions in the
707 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
708 IRBuilder<>(Returns[i]).CreateCall(StackRestore, SavedPtr);
712 // If we are inlining tail call instruction through a call site that isn't
713 // marked 'tail', we must remove the tail marker for any calls in the inlined
714 // code. Also, calls inlined through a 'nounwind' call site should be marked
716 if (InlinedFunctionInfo.ContainsCalls &&
717 (MustClearTailCallFlags || MarkNoUnwind)) {
718 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
720 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
721 if (CallInst *CI = dyn_cast<CallInst>(I)) {
722 if (MustClearTailCallFlags)
723 CI->setTailCall(false);
725 CI->setDoesNotThrow();
729 // If we are inlining for an invoke instruction, we must make sure to rewrite
730 // any call instructions into invoke instructions.
731 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
732 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
734 // If we cloned in _exactly one_ basic block, and if that block ends in a
735 // return instruction, we splice the body of the inlined callee directly into
736 // the calling basic block.
737 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
738 // Move all of the instructions right before the call.
739 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
740 FirstNewBlock->begin(), FirstNewBlock->end());
741 // Remove the cloned basic block.
742 Caller->getBasicBlockList().pop_back();
744 // If the call site was an invoke instruction, add a branch to the normal
746 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
747 BranchInst *NewBr = BranchInst::Create(II->getNormalDest(), TheCall);
748 NewBr->setDebugLoc(Returns[0]->getDebugLoc());
751 // If the return instruction returned a value, replace uses of the call with
752 // uses of the returned value.
753 if (!TheCall->use_empty()) {
754 ReturnInst *R = Returns[0];
755 if (TheCall == R->getReturnValue())
756 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
758 TheCall->replaceAllUsesWith(R->getReturnValue());
760 // Since we are now done with the Call/Invoke, we can delete it.
761 TheCall->eraseFromParent();
763 // Since we are now done with the return instruction, delete it also.
764 Returns[0]->eraseFromParent();
766 // We are now done with the inlining.
770 // Otherwise, we have the normal case, of more than one block to inline or
771 // multiple return sites.
773 // We want to clone the entire callee function into the hole between the
774 // "starter" and "ender" blocks. How we accomplish this depends on whether
775 // this is an invoke instruction or a call instruction.
776 BasicBlock *AfterCallBB;
777 BranchInst *CreatedBranchToNormalDest = NULL;
778 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
780 // Add an unconditional branch to make this look like the CallInst case...
781 CreatedBranchToNormalDest = BranchInst::Create(II->getNormalDest(), TheCall);
783 // Split the basic block. This guarantees that no PHI nodes will have to be
784 // updated due to new incoming edges, and make the invoke case more
785 // symmetric to the call case.
786 AfterCallBB = OrigBB->splitBasicBlock(CreatedBranchToNormalDest,
787 CalledFunc->getName()+".exit");
789 } else { // It's a call
790 // If this is a call instruction, we need to split the basic block that
791 // the call lives in.
793 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
794 CalledFunc->getName()+".exit");
797 // Change the branch that used to go to AfterCallBB to branch to the first
798 // basic block of the inlined function.
800 TerminatorInst *Br = OrigBB->getTerminator();
801 assert(Br && Br->getOpcode() == Instruction::Br &&
802 "splitBasicBlock broken!");
803 Br->setOperand(0, FirstNewBlock);
806 // Now that the function is correct, make it a little bit nicer. In
807 // particular, move the basic blocks inserted from the end of the function
808 // into the space made by splitting the source basic block.
809 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
810 FirstNewBlock, Caller->end());
812 // Handle all of the return instructions that we just cloned in, and eliminate
813 // any users of the original call/invoke instruction.
814 Type *RTy = CalledFunc->getReturnType();
817 if (Returns.size() > 1) {
818 // The PHI node should go at the front of the new basic block to merge all
819 // possible incoming values.
820 if (!TheCall->use_empty()) {
821 PHI = PHINode::Create(RTy, Returns.size(), TheCall->getName(),
822 AfterCallBB->begin());
823 // Anything that used the result of the function call should now use the
824 // PHI node as their operand.
825 TheCall->replaceAllUsesWith(PHI);
828 // Loop over all of the return instructions adding entries to the PHI node
831 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
832 ReturnInst *RI = Returns[i];
833 assert(RI->getReturnValue()->getType() == PHI->getType() &&
834 "Ret value not consistent in function!");
835 PHI->addIncoming(RI->getReturnValue(), RI->getParent());
840 // Add a branch to the merge points and remove return instructions.
842 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
843 ReturnInst *RI = Returns[i];
844 BranchInst* BI = BranchInst::Create(AfterCallBB, RI);
845 Loc = RI->getDebugLoc();
846 BI->setDebugLoc(Loc);
847 RI->eraseFromParent();
849 // We need to set the debug location to *somewhere* inside the
850 // inlined function. The line number may be nonsensical, but the
851 // instruction will at least be associated with the right
853 if (CreatedBranchToNormalDest)
854 CreatedBranchToNormalDest->setDebugLoc(Loc);
855 } else if (!Returns.empty()) {
856 // Otherwise, if there is exactly one return value, just replace anything
857 // using the return value of the call with the computed value.
858 if (!TheCall->use_empty()) {
859 if (TheCall == Returns[0]->getReturnValue())
860 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
862 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
865 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
866 BasicBlock *ReturnBB = Returns[0]->getParent();
867 ReturnBB->replaceAllUsesWith(AfterCallBB);
869 // Splice the code from the return block into the block that it will return
870 // to, which contains the code that was after the call.
871 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
872 ReturnBB->getInstList());
874 if (CreatedBranchToNormalDest)
875 CreatedBranchToNormalDest->setDebugLoc(Returns[0]->getDebugLoc());
877 // Delete the return instruction now and empty ReturnBB now.
878 Returns[0]->eraseFromParent();
879 ReturnBB->eraseFromParent();
880 } else if (!TheCall->use_empty()) {
881 // No returns, but something is using the return value of the call. Just
883 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
886 // Since we are now done with the Call/Invoke, we can delete it.
887 TheCall->eraseFromParent();
889 // We should always be able to fold the entry block of the function into the
890 // single predecessor of the block...
891 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
892 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
894 // Splice the code entry block into calling block, right before the
895 // unconditional branch.
896 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
897 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
899 // Remove the unconditional branch.
900 OrigBB->getInstList().erase(Br);
902 // Now we can remove the CalleeEntry block, which is now empty.
903 Caller->getBasicBlockList().erase(CalleeEntry);
905 // If we inserted a phi node, check to see if it has a single value (e.g. all
906 // the entries are the same or undef). If so, remove the PHI so it doesn't
907 // block other optimizations.
909 if (Value *V = SimplifyInstruction(PHI, IFI.DL)) {
910 PHI->replaceAllUsesWith(V);
911 PHI->eraseFromParent();