1 //===- InlineFunction.cpp - Code to perform function inlining -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inlining of a function into a call site, resolving
11 // parameters and the return value as appropriate.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/Utils/Cloning.h"
16 #include "llvm/Constants.h"
17 #include "llvm/DerivedTypes.h"
18 #include "llvm/Module.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/Intrinsics.h"
21 #include "llvm/ParameterAttributes.h"
22 #include "llvm/Analysis/CallGraph.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringExtras.h"
26 #include "llvm/Support/CallSite.h"
29 bool llvm::InlineFunction(CallInst *CI, CallGraph *CG, const TargetData *TD) {
30 return InlineFunction(CallSite(CI), CG, TD);
32 bool llvm::InlineFunction(InvokeInst *II, CallGraph *CG, const TargetData *TD) {
33 return InlineFunction(CallSite(II), CG, TD);
36 /// HandleInlinedInvoke - If we inlined an invoke site, we need to convert calls
37 /// in the body of the inlined function into invokes and turn unwind
38 /// instructions into branches to the invoke unwind dest.
40 /// II is the invoke instruction begin inlined. FirstNewBlock is the first
41 /// block of the inlined code (the last block is the end of the function),
42 /// and InlineCodeInfo is information about the code that got inlined.
43 static void HandleInlinedInvoke(InvokeInst *II, BasicBlock *FirstNewBlock,
44 ClonedCodeInfo &InlinedCodeInfo) {
45 BasicBlock *InvokeDest = II->getUnwindDest();
46 std::vector<Value*> InvokeDestPHIValues;
48 // If there are PHI nodes in the unwind destination block, we need to
49 // keep track of which values came into them from this invoke, then remove
50 // the entry for this block.
51 BasicBlock *InvokeBlock = II->getParent();
52 for (BasicBlock::iterator I = InvokeDest->begin(); isa<PHINode>(I); ++I) {
53 PHINode *PN = cast<PHINode>(I);
54 // Save the value to use for this edge.
55 InvokeDestPHIValues.push_back(PN->getIncomingValueForBlock(InvokeBlock));
58 Function *Caller = FirstNewBlock->getParent();
60 // The inlined code is currently at the end of the function, scan from the
61 // start of the inlined code to its end, checking for stuff we need to
63 if (InlinedCodeInfo.ContainsCalls || InlinedCodeInfo.ContainsUnwinds) {
64 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
66 if (InlinedCodeInfo.ContainsCalls) {
67 for (BasicBlock::iterator BBI = BB->begin(), E = BB->end(); BBI != E; ){
68 Instruction *I = BBI++;
70 // We only need to check for function calls: inlined invoke
71 // instructions require no special handling.
72 if (!isa<CallInst>(I)) continue;
73 CallInst *CI = cast<CallInst>(I);
75 // If this call cannot unwind, don't convert it to an invoke.
76 if (CI->doesNotThrow())
79 // Convert this function call into an invoke instruction.
80 // First, split the basic block.
81 BasicBlock *Split = BB->splitBasicBlock(CI, CI->getName()+".noexc");
83 // Next, create the new invoke instruction, inserting it at the end
84 // of the old basic block.
85 SmallVector<Value*, 8> InvokeArgs(CI->op_begin()+1, CI->op_end());
87 new InvokeInst(CI->getCalledValue(), Split, InvokeDest,
88 InvokeArgs.begin(), InvokeArgs.end(),
89 CI->getName(), BB->getTerminator());
90 II->setCallingConv(CI->getCallingConv());
91 II->setParamAttrs(CI->getParamAttrs());
93 // Make sure that anything using the call now uses the invoke!
94 CI->replaceAllUsesWith(II);
96 // Delete the unconditional branch inserted by splitBasicBlock
97 BB->getInstList().pop_back();
98 Split->getInstList().pop_front(); // Delete the original call
100 // Update any PHI nodes in the exceptional block to indicate that
101 // there is now a new entry in them.
103 for (BasicBlock::iterator I = InvokeDest->begin();
104 isa<PHINode>(I); ++I, ++i) {
105 PHINode *PN = cast<PHINode>(I);
106 PN->addIncoming(InvokeDestPHIValues[i], BB);
109 // This basic block is now complete, start scanning the next one.
114 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
115 // An UnwindInst requires special handling when it gets inlined into an
116 // invoke site. Once this happens, we know that the unwind would cause
117 // a control transfer to the invoke exception destination, so we can
118 // transform it into a direct branch to the exception destination.
119 new BranchInst(InvokeDest, UI);
121 // Delete the unwind instruction!
122 UI->getParent()->getInstList().pop_back();
124 // Update any PHI nodes in the exceptional block to indicate that
125 // there is now a new entry in them.
127 for (BasicBlock::iterator I = InvokeDest->begin();
128 isa<PHINode>(I); ++I, ++i) {
129 PHINode *PN = cast<PHINode>(I);
130 PN->addIncoming(InvokeDestPHIValues[i], BB);
136 // Now that everything is happy, we have one final detail. The PHI nodes in
137 // the exception destination block still have entries due to the original
138 // invoke instruction. Eliminate these entries (which might even delete the
140 InvokeDest->removePredecessor(II->getParent());
143 /// UpdateCallGraphAfterInlining - Once we have cloned code over from a callee
144 /// into the caller, update the specified callgraph to reflect the changes we
145 /// made. Note that it's possible that not all code was copied over, so only
146 /// some edges of the callgraph will be remain.
147 static void UpdateCallGraphAfterInlining(const Function *Caller,
148 const Function *Callee,
149 Function::iterator FirstNewBlock,
150 DenseMap<const Value*, Value*> &ValueMap,
152 // Update the call graph by deleting the edge from Callee to Caller
153 CallGraphNode *CalleeNode = CG[Callee];
154 CallGraphNode *CallerNode = CG[Caller];
155 CallerNode->removeCallEdgeTo(CalleeNode);
157 // Since we inlined some uninlined call sites in the callee into the caller,
158 // add edges from the caller to all of the callees of the callee.
159 for (CallGraphNode::iterator I = CalleeNode->begin(),
160 E = CalleeNode->end(); I != E; ++I) {
161 const Instruction *OrigCall = I->first.getInstruction();
163 DenseMap<const Value*, Value*>::iterator VMI = ValueMap.find(OrigCall);
164 // Only copy the edge if the call was inlined!
165 if (VMI != ValueMap.end() && VMI->second) {
166 // If the call was inlined, but then constant folded, there is no edge to
167 // add. Check for this case.
168 if (Instruction *NewCall = dyn_cast<Instruction>(VMI->second))
169 CallerNode->addCalledFunction(CallSite::get(NewCall), I->second);
175 // InlineFunction - This function inlines the called function into the basic
176 // block of the caller. This returns false if it is not possible to inline this
177 // call. The program is still in a well defined state if this occurs though.
179 // Note that this only does one level of inlining. For example, if the
180 // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now
181 // exists in the instruction stream. Similiarly this will inline a recursive
182 // function by one level.
184 bool llvm::InlineFunction(CallSite CS, CallGraph *CG, const TargetData *TD) {
185 Instruction *TheCall = CS.getInstruction();
186 assert(TheCall->getParent() && TheCall->getParent()->getParent() &&
187 "Instruction not in function!");
189 const Function *CalledFunc = CS.getCalledFunction();
190 if (CalledFunc == 0 || // Can't inline external function or indirect
191 CalledFunc->isDeclaration() || // call, or call to a vararg function!
192 CalledFunc->getFunctionType()->isVarArg()) return false;
195 // If the call to the callee is a non-tail call, we must clear the 'tail'
196 // flags on any calls that we inline.
197 bool MustClearTailCallFlags =
198 isa<CallInst>(TheCall) && !cast<CallInst>(TheCall)->isTailCall();
200 // If the call to the callee cannot throw, set the 'nounwind' flag on any
201 // calls that we inline.
202 bool MarkNoUnwind = CS.doesNotThrow();
204 BasicBlock *OrigBB = TheCall->getParent();
205 Function *Caller = OrigBB->getParent();
206 BasicBlock *UnwindBB = OrigBB->getUnwindDest();
208 // GC poses two hazards to inlining, which only occur when the callee has GC:
209 // 1. If the caller has no GC, then the callee's GC must be propagated to the
211 // 2. If the caller has a differing GC, it is invalid to inline.
212 if (CalledFunc->hasCollector()) {
213 if (!Caller->hasCollector())
214 Caller->setCollector(CalledFunc->getCollector());
215 else if (CalledFunc->getCollector() != Caller->getCollector())
219 // Get an iterator to the last basic block in the function, which will have
220 // the new function inlined after it.
222 Function::iterator LastBlock = &Caller->back();
224 // Make sure to capture all of the return instructions from the cloned
226 std::vector<ReturnInst*> Returns;
227 ClonedCodeInfo InlinedFunctionInfo;
228 Function::iterator FirstNewBlock;
230 { // Scope to destroy ValueMap after cloning.
231 DenseMap<const Value*, Value*> ValueMap;
233 assert(std::distance(CalledFunc->arg_begin(), CalledFunc->arg_end()) ==
234 std::distance(CS.arg_begin(), CS.arg_end()) &&
235 "No varargs calls can be inlined!");
237 // Calculate the vector of arguments to pass into the function cloner, which
238 // matches up the formal to the actual argument values.
239 CallSite::arg_iterator AI = CS.arg_begin();
241 for (Function::const_arg_iterator I = CalledFunc->arg_begin(),
242 E = CalledFunc->arg_end(); I != E; ++I, ++AI, ++ArgNo) {
243 Value *ActualArg = *AI;
245 // When byval arguments actually inlined, we need to make the copy implied
246 // by them explicit. However, we don't do this if the callee is readonly
247 // or readnone, because the copy would be unneeded: the callee doesn't
248 // modify the struct.
249 if (CalledFunc->paramHasAttr(ArgNo+1, ParamAttr::ByVal) &&
250 !CalledFunc->onlyReadsMemory()) {
251 const Type *AggTy = cast<PointerType>(I->getType())->getElementType();
252 const Type *VoidPtrTy = PointerType::getUnqual(Type::Int8Ty);
254 // Create the alloca. If we have TargetData, use nice alignment.
256 if (TD) Align = TD->getPrefTypeAlignment(AggTy);
257 Value *NewAlloca = new AllocaInst(AggTy, 0, Align, I->getName(),
258 Caller->begin()->begin());
260 Function *MemCpyFn = Intrinsic::getDeclaration(Caller->getParent(),
261 Intrinsic::memcpy_i64);
262 Value *DestCast = new BitCastInst(NewAlloca, VoidPtrTy, "tmp", TheCall);
263 Value *SrcCast = new BitCastInst(*AI, VoidPtrTy, "tmp", TheCall);
267 Size = ConstantExpr::getSizeOf(AggTy);
269 Size = ConstantInt::get(Type::Int64Ty, TD->getTypeStoreSize(AggTy));
271 // Always generate a memcpy of alignment 1 here because we don't know
272 // the alignment of the src pointer. Other optimizations can infer
274 Value *CallArgs[] = {
275 DestCast, SrcCast, Size, ConstantInt::get(Type::Int32Ty, 1)
277 CallInst *TheMemCpy =
278 new CallInst(MemCpyFn, CallArgs, CallArgs+4, "", TheCall);
280 // If we have a call graph, update it.
282 CallGraphNode *MemCpyCGN = CG->getOrInsertFunction(MemCpyFn);
283 CallGraphNode *CallerNode = (*CG)[Caller];
284 CallerNode->addCalledFunction(TheMemCpy, MemCpyCGN);
287 // Uses of the argument in the function should use our new alloca
289 ActualArg = NewAlloca;
292 ValueMap[I] = ActualArg;
295 // We want the inliner to prune the code as it copies. We would LOVE to
296 // have no dead or constant instructions leftover after inlining occurs
297 // (which can happen, e.g., because an argument was constant), but we'll be
298 // happy with whatever the cloner can do.
299 CloneAndPruneFunctionInto(Caller, CalledFunc, ValueMap, Returns, ".i",
300 &InlinedFunctionInfo, TD);
302 // Remember the first block that is newly cloned over.
303 FirstNewBlock = LastBlock; ++FirstNewBlock;
305 // Update the callgraph if requested.
307 UpdateCallGraphAfterInlining(Caller, CalledFunc, FirstNewBlock, ValueMap,
311 // If there are any alloca instructions in the block that used to be the entry
312 // block for the callee, move them to the entry block of the caller. First
313 // calculate which instruction they should be inserted before. We insert the
314 // instructions at the end of the current alloca list.
317 BasicBlock::iterator InsertPoint = Caller->begin()->begin();
318 for (BasicBlock::iterator I = FirstNewBlock->begin(),
319 E = FirstNewBlock->end(); I != E; )
320 if (AllocaInst *AI = dyn_cast<AllocaInst>(I++)) {
321 // If the alloca is now dead, remove it. This often occurs due to code
323 if (AI->use_empty()) {
324 AI->eraseFromParent();
328 if (isa<Constant>(AI->getArraySize())) {
329 // Scan for the block of allocas that we can move over, and move them
331 while (isa<AllocaInst>(I) &&
332 isa<Constant>(cast<AllocaInst>(I)->getArraySize()))
335 // Transfer all of the allocas over in a block. Using splice means
336 // that the instructions aren't removed from the symbol table, then
338 Caller->getEntryBlock().getInstList().splice(
340 FirstNewBlock->getInstList(),
346 // If the inlined code contained dynamic alloca instructions, wrap the inlined
347 // code with llvm.stacksave/llvm.stackrestore intrinsics.
348 if (InlinedFunctionInfo.ContainsDynamicAllocas) {
349 Module *M = Caller->getParent();
350 const Type *BytePtr = PointerType::getUnqual(Type::Int8Ty);
351 // Get the two intrinsics we care about.
352 Constant *StackSave, *StackRestore;
353 StackSave = M->getOrInsertFunction("llvm.stacksave", BytePtr, NULL);
354 StackRestore = M->getOrInsertFunction("llvm.stackrestore", Type::VoidTy,
357 // If we are preserving the callgraph, add edges to the stacksave/restore
358 // functions for the calls we insert.
359 CallGraphNode *StackSaveCGN = 0, *StackRestoreCGN = 0, *CallerNode = 0;
361 // We know that StackSave/StackRestore are Function*'s, because they are
362 // intrinsics which must have the right types.
363 StackSaveCGN = CG->getOrInsertFunction(cast<Function>(StackSave));
364 StackRestoreCGN = CG->getOrInsertFunction(cast<Function>(StackRestore));
365 CallerNode = (*CG)[Caller];
368 // Insert the llvm.stacksave.
369 CallInst *SavedPtr = new CallInst(StackSave, "savedstack",
370 FirstNewBlock->begin());
371 if (CG) CallerNode->addCalledFunction(SavedPtr, StackSaveCGN);
373 // Insert a call to llvm.stackrestore before any return instructions in the
375 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
376 CallInst *CI = new CallInst(StackRestore, SavedPtr, "", Returns[i]);
377 if (CG) CallerNode->addCalledFunction(CI, StackRestoreCGN);
380 // Count the number of StackRestore calls we insert.
381 unsigned NumStackRestores = Returns.size();
383 // If we are inlining an invoke instruction, insert restores before each
384 // unwind. These unwinds will be rewritten into branches later.
385 if (InlinedFunctionInfo.ContainsUnwinds && isa<InvokeInst>(TheCall)) {
386 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
388 if (UnwindInst *UI = dyn_cast<UnwindInst>(BB->getTerminator())) {
389 new CallInst(StackRestore, SavedPtr, "", UI);
395 // If we are inlining tail call instruction through a call site that isn't
396 // marked 'tail', we must remove the tail marker for any calls in the inlined
397 // code. Also, calls inlined through a 'nounwind' call site should be marked
399 if (InlinedFunctionInfo.ContainsCalls &&
400 (MustClearTailCallFlags || MarkNoUnwind)) {
401 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
403 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I)
404 if (CallInst *CI = dyn_cast<CallInst>(I)) {
405 if (MustClearTailCallFlags)
406 CI->setTailCall(false);
408 CI->setDoesNotThrow();
412 // If we are inlining through a 'nounwind' call site then any inlined 'unwind'
413 // instructions are unreachable.
414 if (InlinedFunctionInfo.ContainsUnwinds && MarkNoUnwind)
415 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
417 TerminatorInst *Term = BB->getTerminator();
418 if (isa<UnwindInst>(Term)) {
419 new UnreachableInst(Term);
420 BB->getInstList().erase(Term);
424 // If we are inlining a function that unwinds into a BB with an unwind dest,
425 // turn the inlined unwinds into branches to the unwind dest.
426 if (InlinedFunctionInfo.ContainsUnwinds && UnwindBB && isa<CallInst>(TheCall))
427 for (Function::iterator BB = FirstNewBlock, E = Caller->end();
429 TerminatorInst *Term = BB->getTerminator();
430 if (isa<UnwindInst>(Term)) {
431 new BranchInst(UnwindBB, Term);
432 BB->getInstList().erase(Term);
436 // If we are inlining for an invoke instruction, we must make sure to rewrite
437 // any inlined 'unwind' instructions into branches to the invoke exception
438 // destination, and call instructions into invoke instructions.
439 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
440 HandleInlinedInvoke(II, FirstNewBlock, InlinedFunctionInfo);
442 // If we cloned in _exactly one_ basic block, and if that block ends in a
443 // return instruction, we splice the body of the inlined callee directly into
444 // the calling basic block.
445 if (Returns.size() == 1 && std::distance(FirstNewBlock, Caller->end()) == 1) {
446 // Move all of the instructions right before the call.
447 OrigBB->getInstList().splice(TheCall, FirstNewBlock->getInstList(),
448 FirstNewBlock->begin(), FirstNewBlock->end());
449 // Remove the cloned basic block.
450 Caller->getBasicBlockList().pop_back();
452 // If the call site was an invoke instruction, add a branch to the normal
454 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall))
455 new BranchInst(II->getNormalDest(), TheCall);
457 // If the return instruction returned a value, replace uses of the call with
458 // uses of the returned value.
459 if (!TheCall->use_empty()) {
460 ReturnInst *R = Returns[0];
461 if (R->getNumOperands() > 1) {
462 // Multiple return values.
463 while (!TheCall->use_empty()) {
464 GetResultInst *GR = cast<GetResultInst>(TheCall->use_back());
465 Value *RV = R->getOperand(GR->getIndex());
466 GR->replaceAllUsesWith(RV);
467 GR->eraseFromParent();
470 TheCall->replaceAllUsesWith(R->getReturnValue());
472 // Since we are now done with the Call/Invoke, we can delete it.
473 TheCall->getParent()->getInstList().erase(TheCall);
475 // Since we are now done with the return instruction, delete it also.
476 Returns[0]->getParent()->getInstList().erase(Returns[0]);
478 // We are now done with the inlining.
482 // Otherwise, we have the normal case, of more than one block to inline or
483 // multiple return sites.
485 // We want to clone the entire callee function into the hole between the
486 // "starter" and "ender" blocks. How we accomplish this depends on whether
487 // this is an invoke instruction or a call instruction.
488 BasicBlock *AfterCallBB;
489 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
491 // Add an unconditional branch to make this look like the CallInst case...
492 BranchInst *NewBr = new BranchInst(II->getNormalDest(), TheCall);
494 // Split the basic block. This guarantees that no PHI nodes will have to be
495 // updated due to new incoming edges, and make the invoke case more
496 // symmetric to the call case.
497 AfterCallBB = OrigBB->splitBasicBlock(NewBr,
498 CalledFunc->getName()+".exit");
500 } else { // It's a call
501 // If this is a call instruction, we need to split the basic block that
502 // the call lives in.
504 AfterCallBB = OrigBB->splitBasicBlock(TheCall,
505 CalledFunc->getName()+".exit");
508 // Change the branch that used to go to AfterCallBB to branch to the first
509 // basic block of the inlined function.
511 TerminatorInst *Br = OrigBB->getTerminator();
512 assert(Br && Br->getOpcode() == Instruction::Br &&
513 "splitBasicBlock broken!");
514 Br->setOperand(0, FirstNewBlock);
517 // Now that the function is correct, make it a little bit nicer. In
518 // particular, move the basic blocks inserted from the end of the function
519 // into the space made by splitting the source basic block.
520 Caller->getBasicBlockList().splice(AfterCallBB, Caller->getBasicBlockList(),
521 FirstNewBlock, Caller->end());
523 // Handle all of the return instructions that we just cloned in, and eliminate
524 // any users of the original call/invoke instruction.
525 const Type *RTy = CalledFunc->getReturnType();
526 const StructType *STy = dyn_cast<StructType>(RTy);
527 if (Returns.size() > 1 || STy) {
528 // The PHI node should go at the front of the new basic block to merge all
529 // possible incoming values.
530 SmallVector<PHINode *, 4> PHIs;
531 if (!TheCall->use_empty()) {
533 unsigned NumRetVals = STy->getNumElements();
534 // Create new phi nodes such that phi node number in the PHIs vector
535 // match corresponding return value operand number.
536 Instruction *InsertPt = AfterCallBB->begin();
537 for (unsigned i = 0; i < NumRetVals; ++i) {
538 PHINode *PHI = new PHINode(STy->getElementType(i),
539 TheCall->getName() + "." + utostr(i),
543 // TheCall results are used by GetResult instructions.
544 while (!TheCall->use_empty()) {
545 GetResultInst *GR = cast<GetResultInst>(TheCall->use_back());
546 GR->replaceAllUsesWith(PHIs[GR->getIndex()]);
547 GR->eraseFromParent();
550 PHINode *PHI = new PHINode(RTy, TheCall->getName(), AfterCallBB->begin());
552 // Anything that used the result of the function call should now use the
553 // PHI node as their operand.
554 TheCall->replaceAllUsesWith(PHI);
558 // Loop over all of the return instructions adding entries to the PHI node as
561 // There is atleast one return value.
562 unsigned NumRetVals = 1;
564 NumRetVals = STy->getNumElements();
565 for (unsigned j = 0; j < NumRetVals; ++j) {
566 PHINode *PHI = PHIs[j];
567 // Each PHI node will receive one value from each return instruction.
568 for(unsigned i = 0, e = Returns.size(); i != e; ++i) {
569 ReturnInst *RI = Returns[i];
570 assert(RI->getReturnValue(j)->getType() == PHI->getType() &&
571 "Ret value not consistent in function!");
572 PHI->addIncoming(RI->getReturnValue(j /*PHI number matches operand number*/),
578 // Add a branch to the merge points and remove retrun instructions.
579 for (unsigned i = 0, e = Returns.size(); i != e; ++i) {
580 ReturnInst *RI = Returns[i];
581 new BranchInst(AfterCallBB, RI);
582 RI->eraseFromParent();
584 } else if (!Returns.empty()) {
585 // Otherwise, if there is exactly one return value, just replace anything
586 // using the return value of the call with the computed value.
587 if (!TheCall->use_empty())
588 TheCall->replaceAllUsesWith(Returns[0]->getReturnValue());
590 // Splice the code from the return block into the block that it will return
591 // to, which contains the code that was after the call.
592 BasicBlock *ReturnBB = Returns[0]->getParent();
593 AfterCallBB->getInstList().splice(AfterCallBB->begin(),
594 ReturnBB->getInstList());
596 // Update PHI nodes that use the ReturnBB to use the AfterCallBB.
597 ReturnBB->replaceAllUsesWith(AfterCallBB);
599 // Delete the return instruction now and empty ReturnBB now.
600 Returns[0]->eraseFromParent();
601 ReturnBB->eraseFromParent();
602 } else if (!TheCall->use_empty()) {
603 // No returns, but something is using the return value of the call. Just
605 TheCall->replaceAllUsesWith(UndefValue::get(TheCall->getType()));
608 // Since we are now done with the Call/Invoke, we can delete it.
609 TheCall->eraseFromParent();
611 // We should always be able to fold the entry block of the function into the
612 // single predecessor of the block...
613 assert(cast<BranchInst>(Br)->isUnconditional() && "splitBasicBlock broken!");
614 BasicBlock *CalleeEntry = cast<BranchInst>(Br)->getSuccessor(0);
616 // Splice the code entry block into calling block, right before the
617 // unconditional branch.
618 OrigBB->getInstList().splice(Br, CalleeEntry->getInstList());
619 CalleeEntry->replaceAllUsesWith(OrigBB); // Update PHI nodes
621 // Remove the unconditional branch.
622 OrigBB->getInstList().erase(Br);
624 // Now we can remove the CalleeEntry block, which is now empty.
625 Caller->getBasicBlockList().erase(CalleeEntry);