1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/ParameterAttributes.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/Support/CallSite.h"
28 bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD) {
29 return InlineFunction(CallSite(CI), CG, TD);
31 bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD) {
32 return InlineFunction(CallSite(II), CG, TD);
35 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
36 /// in the body of the inlined function into invokes and turn unwind
37 /// instructions into branches to the invoke unwind dest.
39 /// II is the invoke instruction begin inlined. FirstNewBlock is the first
40 /// block of the inlined code (the last block is the end of the function),
41 /// and InlineCodeInfo is information about the code that got inlined.
42 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
43 ClonedCodeInfo &InlinedCodeInfo) {
44 BasicBlock *InvokeDest = II->getUnwindDest();
45 std::vector<Value*> InvokeDestPHIValues;
47 // If there are PHI nodes in the unwind destination block, we need to
48 // keep track of which values came into them from this invoke, then remove
49 // the entry for this block.
50 BasicBlock *InvokeBlock = II->getParent();
51 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
52 PHINode *PN = cast<PHINode>(I);
53 // Save the value to use for this edge.
54 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
57 Function *Caller = FirstNewBlock->getParent();
59 // The inlined code is currently at the end of the function, scan from the
60 // start of the inlined code to its end, checking for stuff we need to
62 if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) {
63 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
65 if (InlinedCodeInfo.ContainsCalls) {
66 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){
67 Instruction *I = BBI++;
69 // We only need to check for function calls: inlined invoke
70 // instructions require no special handling.
71 if (!isa<CallInst>(I)) continue;
72 CallInst *CI = cast<CallInst>(I);
74 // If this call cannot unwind, don't convert it to an invoke.
75 if (CI->doesNotThrow())
78 // Convert this function call into an invoke instruction.
79 // First, split the basic block.
80 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
82 // Next, create the new invoke instruction, inserting it at the end
83 // of the old basic block.
84 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
86 new InvokeInst(CI->getCalledValue(), Split, InvokeDest,
87 InvokeArgs.begin(), InvokeArgs.end(),
88 CI->getName(), BB->getTerminator());
89 II->setCallingConv(CI->getCallingConv());
90 II->setParamAttrs(CI->getParamAttrs());
92 // Make sure that anything using the call now uses the invoke!
93 CI->replaceAllUsesWith(II);
95 // Delete the unconditional branch inserted by splitBasicBlock
96 BB->getInstList().pop_back();
97 Split->getInstList().pop_front(); // Delete the original call
99 // Update any PHI nodes in the exceptional block to indicate that
100 // there is now a new entry in them.
102 for (BasicBlock::iterator I = InvokeDest->begin();
103 isa<PHINode>(I); ++I, ++i) {
104 PHINode *PN = cast<PHINode>(I);
105 PN->addIncoming(InvokeDestPHIValues[i], BB);
108 // This basic block is now complete, start scanning the next one.
113 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
114 // An UnwindInst requires special handling when it gets inlined into an
115 // invoke site. Once this happens, we know that the unwind would cause
116 // a control transfer to the invoke exception destination, so we can
117 // transform it into a direct branch to the exception destination.
118 new BranchInst(InvokeDest, UI);
120 // Delete the unwind instruction!
121 UI->getParent()->getInstList().pop_back();
123 // Update any PHI nodes in the exceptional block to indicate that
124 // there is now a new entry in them.
126 for (BasicBlock::iterator I = InvokeDest->begin();
127 isa<PHINode>(I); ++I, ++i) {
128 PHINode *PN = cast<PHINode>(I);
129 PN->addIncoming(InvokeDestPHIValues[i], BB);
135 // Now that everything is happy, we have one final detail. The PHI nodes in
136 // the exception destination block still have entries due to the original
137 // invoke instruction. Eliminate these entries (which might even delete the
139 InvokeDest->removePredecessor(II->getParent());
142 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
143 /// into the caller, update the specified callgraph to reflect the changes we
144 /// made. Note that it's possible that not all code was copied over, so only
145 /// some edges of the callgraph will be remain.
146 static void UpdateCallGraphAfterInlining(const Function *Caller,
147 const Function *Callee,
148 Function::iterator FirstNewBlock,
149 DenseMap<const Value*, Value*> &ValueMap,
151 // Update the call graph by deleting the edge from Callee to Caller
152 CallGraphNode *CalleeNode = CG[Callee];
153 CallGraphNode *CallerNode = CG[Caller];
154 CallerNode->removeCallEdgeTo(CalleeNode);
156 // Since we inlined some uninlined call sites in the callee into the caller,
157 // add edges from the caller to all of the callees of the callee.
158 for (CallGraphNode::iterator I = CalleeNode->begin(),
159 E = CalleeNode->end(); I != E; ++I) {
160 const Instruction *OrigCall = I->first.getInstruction();
162 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
163 // Only copy the edge if the call was inlined!
164 if (VMI != ValueMap.end() && VMI->second) {
165 // If the call was inlined, but then constant folded, there is no edge to
166 // add. Check for this case.
167 if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
168 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
174 // InlineFunction - This function inlines the called function into the basic
175 // block of the caller. This returns false if it is not possible to inline this
176 // call. The program is still in a well defined state if this occurs though.
178 // Note that this only does one level of inlining. For example, if the
179 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
180 // exists in the instruction stream. Similiarly this will inline a recursive
181 // function by one level.
183 bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) {
184 Instruction *TheCall = CS.getInstruction();
185 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
186 "Instruction not in function!");
188 const Function *CalledFunc = CS.getCalledFunction();
189 if (CalledFunc == 0 || // Can't inline external function or indirect
190 CalledFunc->isDeclaration() || // call, or call to a vararg function!
191 CalledFunc->getFunctionType()->isVarArg()) return false;
194 // If the call to the callee is a non-tail call, we must clear the 'tail'
195 // flags on any calls that we inline.
196 bool MustClearTailCallFlags =
197 isa<CallInst>(TheCall) && !cast<CallInst>(TheCall)->isTailCall();
199 // If the call to the callee cannot throw, set the 'nounwind' flag on any
200 // calls that we inline.
201 bool MarkNoUnwind = CS.doesNotThrow();
203 BasicBlock *OrigBB = TheCall->getParent();
204 Function *Caller = OrigBB->getParent();
205 BasicBlock *UnwindBB = OrigBB->getUnwindDest();
207 // GC poses two hazards to inlining, which only occur when the callee has GC:
208 // 1. If the caller has no GC, then the callee's GC must be propagated to the
210 // 2. If the caller has a differing GC, it is invalid to inline.
211 if (CalledFunc->hasCollector()) {
212 if (!Caller->hasCollector())
213 Caller->setCollector(CalledFunc->getCollector());
214 else if (CalledFunc->getCollector() != Caller->getCollector())
218 // Get an iterator to the last basic block in the function, which will have
219 // the new function inlined after it.
221 Function::iterator LastBlock = &Caller->back();
223 // Make sure to capture all of the return instructions from the cloned
225 std::vector<ReturnInst*> Returns;
226 ClonedCodeInfo InlinedFunctionInfo;
227 Function::iterator FirstNewBlock;
229 { // Scope to destroy ValueMap after cloning.
230 DenseMap<const Value*, Value*> ValueMap;
232 assert(std::distance(CalledFunc->arg_begin(), CalledFunc->arg_end()) ==
233 std::distance(CS.arg_begin(), CS.arg_end()) &&
234 "No varargs calls can be inlined!");
236 // Calculate the vector of arguments to pass into the function cloner, which
237 // matches up the formal to the actual argument values.
238 CallSite::arg_iterator AI = CS.arg_begin();
240 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
241 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
242 Value *ActualArg = *AI;
244 // When byval arguments actually inlined, we need to make the copy implied
245 // by them explicit. However, we don't do this if the callee is readonly
246 // or readnone, because the copy would be unneeded: the callee doesn't
247 // modify the struct.
248 if (CalledFunc->paramHasAttr(ArgNo+1, ParamAttr::ByVal) &&
249 !CalledFunc->onlyReadsMemory()) {
250 const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
251 const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);
253 // Create the alloca. If we have TargetData, use nice alignment.
255 if (TD) Align = TD->getPrefTypeAlignment(AggTy);
256 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(),
257 Caller->begin()->begin());
259 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
260 Intrinsic::memcpy_i64);
261 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
262 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
266 Size = ConstantExpr::getSizeOf(AggTy);
268 Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy));
270 // Always generate a memcpy of alignment 1 here because we don't know
271 // the alignment of the src pointer. Other optimizations can infer
273 Value *CallArgs[] = {
274 DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1)
276 CallInst *TheMemCpy =
277 new CallInst(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
279 // If we have a call graph, update it.
281 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
282 CallGraphNode *CallerNode = (*CG)[Caller];
283 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
286 // Uses of the argument in the function should use our new alloca
288 ActualArg = NewAlloca;
291 ValueMap[I] = ActualArg;
294 // We want the inliner to prune the code as it copies. We would LOVE to
295 // have no dead or constant instructions leftover after inlining occurs
296 // (which can happen, e.g., because an argument was constant), but we'll be
297 // happy with whatever the cloner can do.
298 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
299 &InlinedFunctionInfo, TD);
301 // Remember the first block that is newly cloned over.
302 FirstNewBlock = LastBlock; ++FirstNewBlock;
304 // Update the callgraph if requested.
306 UpdateCallGraphAfterInlining(Caller, CalledFunc, FirstNewBlock, ValueMap,
310 // If there are any alloca instructions in the block that used to be the entry
311 // block for the callee, move them to the entry block of the caller. First
312 // calculate which instruction they should be inserted before. We insert the
313 // instructions at the end of the current alloca list.
316 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
317 for (BasicBlock::iterator I = FirstNewBlock->begin(),
318 E = FirstNewBlock->end(); I != E; )
319 if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) {
320 // If the alloca is now dead, remove it. This often occurs due to code
322 if (AI->use_empty()) {
323 AI->eraseFromParent();
327 if (isa<Constant>(AI->getArraySize())) {
328 // Scan for the block of allocas that we can move over, and move them
330 while (isa<AllocaInst>(I) &&
331 isa<Constant>(cast<AllocaInst>(I)->getArraySize()))
334 // Transfer all of the allocas over in a block. Using splice means
335 // that the instructions aren't removed from the symbol table, then
337 Caller->getEntryBlock().getInstList().splice(
339 FirstNewBlock->getInstList(),
345 // If the inlined code contained dynamic alloca instructions, wrap the inlined
346 // code with llvm.stacksave/llvm.stackrestore intrinsics.
347 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
348 Module *M = Caller->getParent();
349 const Type *BytePtr = PointerType::getUnqual(Type::Int8Ty);
350 // Get the two intrinsics we care about.
351 Constant *StackSave, *StackRestore;
352 StackSave = M->getOrInsertFunction("llvm.stacksave", BytePtr, NULL);
353 StackRestore = M->getOrInsertFunction("llvm.stackrestore", Type::VoidTy,
356 // If we are preserving the callgraph, add edges to the stacksave/restore
357 // functions for the calls we insert.
358 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
360 // We know that StackSave/StackRestore are Function*'s, because they are
361 // intrinsics which must have the right types.
362 StackSaveCGN = CG->getOrInsertFunction(cast<Function>(StackSave));
363 StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore));
364 CallerNode = (*CG)[Caller];
367 // Insert the llvm.stacksave.
368 CallInst *SavedPtr = new CallInst(StackSave, "savedstack",
369 FirstNewBlock->begin());
370 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
372 // Insert a call to llvm.stackrestore before any return instructions in the
374 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
375 CallInst *CI = new CallInst(StackRestore, SavedPtr, "", Returns[i]);
376 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
379 // Count the number of StackRestore calls we insert.
380 unsigned NumStackRestores = Returns.size();
382 // If we are inlining an invoke instruction, insert restores before each
383 // unwind. These unwinds will be rewritten into branches later.
384 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
385 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
387 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
388 new CallInst(StackRestore, SavedPtr, "", UI);
394 // If we are inlining tail call instruction through a call site that isn't
395 // marked 'tail', we must remove the tail marker for any calls in the inlined
396 // code. Also, calls inlined through a 'nounwind' call site should be marked
398 if (InlinedFunctionInfo.ContainsCalls &&
399 (MustClearTailCallFlags || MarkNoUnwind)) {
400 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
402 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
403 if (CallInst *CI = dyn_cast<CallInst>(I)) {
404 if (MustClearTailCallFlags)
405 CI->setTailCall(false);
407 CI->setDoesNotThrow();
411 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
412 // instructions are unreachable.
413 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
414 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
416 TerminatorInst *Term = BB->getTerminator();
417 if (isa<UnwindInst>(Term)) {
418 new UnreachableInst(Term);
419 BB->getInstList().erase(Term);
423 // If we are inlining a function that unwinds into a BB with an unwind dest,
424 // turn the inlined unwinds into branches to the unwind dest.
425 if (InlinedFunctionInfo.ContainsUnwinds && UnwindBB && isa<CallInst>(TheCall))
426 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
428 TerminatorInst *Term = BB->getTerminator();
429 if (isa<UnwindInst>(Term)) {
430 new BranchInst(UnwindBB, Term);
431 BB->getInstList().erase(Term);
435 // If we are inlining for an invoke instruction, we must make sure to rewrite
436 // any inlined 'unwind' instructions into branches to the invoke exception
437 // destination, and call instructions into invoke instructions.
438 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
439 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
441 // If we cloned in _exactly one_ basic block, and if that block ends in a
442 // return instruction, we splice the body of the inlined callee directly into
443 // the calling basic block.
444 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
445 // Move all of the instructions right before the call.
446 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
447 FirstNewBlock->begin(), FirstNewBlock->end());
448 // Remove the cloned basic block.
449 Caller->getBasicBlockList().pop_back();
451 // If the call site was an invoke instruction, add a branch to the normal
453 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
454 new BranchInst(II->getNormalDest(), TheCall);
456 // If the return instruction returned a value, replace uses of the call with
457 // uses of the returned value.
458 if (!TheCall->use_empty()) {
459 ReturnInst *R = Returns[0];
460 if (R->getNumOperands() > 1) {
461 // Multiple return values.
462 while (!TheCall->use_empty()) {
463 GetResultInst *GR = cast<GetResultInst>(TheCall->use_back());
464 Value *RV = R->getOperand(GR->getIndex());
465 GR->replaceAllUsesWith(RV);
466 GR->eraseFromParent();
469 TheCall->replaceAllUsesWith(R->getReturnValue());
471 // Since we are now done with the Call/Invoke, we can delete it.
472 TheCall->getParent()->getInstList().erase(TheCall);
474 // Since we are now done with the return instruction, delete it also.
475 Returns[0]->getParent()->getInstList().erase(Returns[0]);
477 // We are now done with the inlining.
481 // Otherwise, we have the normal case, of more than one block to inline or
482 // multiple return sites.
484 // We want to clone the entire callee function into the hole between the
485 // "starter" and "ender" blocks. How we accomplish this depends on whether
486 // this is an invoke instruction or a call instruction.
487 BasicBlock *AfterCallBB;
488 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
490 // Add an unconditional branch to make this look like the CallInst case...
491 BranchInst *NewBr = new BranchInst(II->getNormalDest(), TheCall);
493 // Split the basic block. This guarantees that no PHI nodes will have to be
494 // updated due to new incoming edges, and make the invoke case more
495 // symmetric to the call case.
496 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
497 CalledFunc->getName()+".exit");
499 } else { // It's a call
500 // If this is a call instruction, we need to split the basic block that
501 // the call lives in.
503 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
504 CalledFunc->getName()+".exit");
507 // Change the branch that used to go to AfterCallBB to branch to the first
508 // basic block of the inlined function.
510 TerminatorInst *Br = OrigBB->getTerminator();
511 assert(Br && Br->getOpcode() == Instruction::Br &&
512 "splitBasicBlock broken!");
513 Br->setOperand(0, FirstNewBlock);
516 // Now that the function is correct, make it a little bit nicer. In
517 // particular, move the basic blocks inserted from the end of the function
518 // into the space made by splitting the source basic block.
519 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
520 FirstNewBlock, Caller->end());
522 // Handle all of the return instructions that we just cloned in, and eliminate
523 // any users of the original call/invoke instruction.
524 if (!Returns.empty()) {
525 // The PHI node should go at the front of the new basic block to merge all
526 // possible incoming values.
527 SmallVector<PHINode *, 4> PHIs;
528 if (!TheCall->use_empty()) {
529 const Type *RTy = CalledFunc->getReturnType();
530 if (const StructType *STy = dyn_cast<StructType>(RTy)) {
531 unsigned NumRetVals = STy->getNumElements();
532 // Create new phi nodes such that phi node number in the PHIs vector
533 // match corresponding return value operand number.
534 for (unsigned i = 0; i < NumRetVals; ++i) {
535 PHINode *PHI = new PHINode(STy->getElementType(i),
536 TheCall->getName(), AfterCallBB->begin());
539 // TheCall results are used by GetResult instructions.
540 while (!TheCall->use_empty()) {
541 GetResultInst *GR = cast<GetResultInst>(TheCall->use_back());
542 GR->replaceAllUsesWith(PHIs[GR->getIndex()]);
543 GR->eraseFromParent();
546 PHINode *PHI = new PHINode(RTy, TheCall->getName(), AfterCallBB->begin());
548 // Anything that used the result of the function call should now use the
549 // PHI node as their operand.
550 TheCall->replaceAllUsesWith(PHI);
554 // Loop over all of the return instructions adding entries to the PHI node as
557 const Type *RTy = CalledFunc->getReturnType();
558 if (const StructType *STy = dyn_cast<StructType>(RTy)) {
559 unsigned NumRetVals = STy->getNumElements();
560 for (unsigned j = 0; j < NumRetVals; ++j) {
561 PHINode *PHI = PHIs[j];
562 // Each PHI node will receive one value from each return instruction.
563 for(unsigned i = 0, e = Returns.size(); i != e; ++i) {
564 ReturnInst *RI = Returns[i];
565 PHI->addIncoming(RI->getReturnValue(j /*PHI number matches operand number*/),
570 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
571 ReturnInst *RI = Returns[i];
572 assert(PHIs.size() == 1 && "Invalid number of PHI nodes");
573 assert(RI->getReturnValue() && "Ret should have value!");
574 assert(RI->getReturnValue()->getType() == PHIs[0]->getType() &&
575 "Ret value not consistent in function!");
576 PHIs[0]->addIncoming(RI->getReturnValue(), RI->getParent());
581 // Add a branch to the merge points and remove retrun instructions.
582 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
583 ReturnInst *RI = Returns[i];
584 new BranchInst(AfterCallBB, RI);
585 RI->getParent()->getInstList().erase(RI);
587 } else if (!TheCall->use_empty()) {
588 // No returns, but something is using the return value of the call. Just
590 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
593 // Since we are now done with the Call/Invoke, we can delete it.
594 TheCall->eraseFromParent();
596 // We should always be able to fold the entry block of the function into the
597 // single predecessor of the block...
598 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
599 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
601 // Splice the code entry block into calling block, right before the
602 // unconditional branch.
603 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
604 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
606 // Remove the unconditional branch.
607 OrigBB->getInstList().erase(Br);
609 // Now we can remove the CalleeEntry block, which is now empty.
610 Caller->getBasicBlockList().erase(CalleeEntry);