1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inline cost analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/Support/CallSite.h"
16 #include "llvm/CallingConv.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/ADT/SmallPtrSet.h"
21 // CountCodeReductionForConstant - Figure out an approximation for how many
22 // instructions will be constant folded if the specified value is constant.
24 unsigned InlineCostAnalyzer::FunctionInfo::
25 CountCodeReductionForConstant(Value *V) {
26 unsigned Reduction = 0;
27 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E; ++UI)
28 if (isa<BranchInst>(*UI))
29 Reduction += 40; // Eliminating a conditional branch is a big win
30 else if (SwitchInst *SI = dyn_cast<SwitchInst>(*UI))
31 // Eliminating a switch is a big win, proportional to the number of edges
33 Reduction += (SI->getNumSuccessors()-1) * 40;
34 else if (isa<IndirectBrInst>(*UI))
35 // Eliminating an indirect branch is a big win.
37 else if (CallInst *CI = dyn_cast<CallInst>(*UI)) {
38 // Turning an indirect call into a direct call is a BIG win
39 Reduction += CI->getCalledValue() == V ? 500 : 0;
40 } else if (InvokeInst *II = dyn_cast<InvokeInst>(*UI)) {
41 // Turning an indirect call into a direct call is a BIG win
42 Reduction += II->getCalledValue() == V ? 500 : 0;
44 // Figure out if this instruction will be removed due to simple constant
46 Instruction &Inst = cast<Instruction>(**UI);
48 // We can't constant propagate instructions which have effects or
51 // FIXME: It would be nice to capture the fact that a load from a
52 // pointer-to-constant-global is actually a *really* good thing to zap.
53 // Unfortunately, we don't know the pointer that may get propagated here,
54 // so we can't make this decision.
55 if (Inst.mayReadFromMemory() || Inst.mayHaveSideEffects() ||
56 isa<AllocaInst>(Inst))
59 bool AllOperandsConstant = true;
60 for (unsigned i = 0, e = Inst.getNumOperands(); i != e; ++i)
61 if (!isa<Constant>(Inst.getOperand(i)) && Inst.getOperand(i) != V) {
62 AllOperandsConstant = false;
66 if (AllOperandsConstant) {
67 // We will get to remove this instruction...
70 // And any other instructions that use it which become constants
72 Reduction += CountCodeReductionForConstant(&Inst);
79 // CountCodeReductionForAlloca - Figure out an approximation of how much smaller
80 // the function will be if it is inlined into a context where an argument
83 unsigned InlineCostAnalyzer::FunctionInfo::
84 CountCodeReductionForAlloca(Value *V) {
85 if (!isa<PointerType>(V->getType())) return 0; // Not a pointer
86 unsigned Reduction = 0;
87 for (Value::use_iterator UI = V->use_begin(), E = V->use_end(); UI != E;++UI){
88 Instruction *I = cast<Instruction>(*UI);
89 if (isa<LoadInst>(I) || isa<StoreInst>(I))
91 else if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I)) {
92 // If the GEP has variable indices, we won't be able to do much with it.
93 if (!GEP->hasAllConstantIndices())
94 Reduction += CountCodeReductionForAlloca(GEP)+15;
96 // If there is some other strange instruction, we're not going to be able
97 // to do much if we inline this.
105 /// analyzeBasicBlock - Fill in the current structure with information gleaned
106 /// from the specified block.
107 void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB) {
110 for (BasicBlock::const_iterator II = BB->begin(), E = BB->end();
112 if (isa<PHINode>(II)) continue; // PHI nodes don't count.
114 // Special handling for calls.
115 if (isa<CallInst>(II) || isa<InvokeInst>(II)) {
116 if (isa<DbgInfoIntrinsic>(II))
117 continue; // Debug intrinsics don't count as size.
119 CallSite CS = CallSite::get(const_cast<Instruction*>(&*II));
121 // If this function contains a call to setjmp or _setjmp, never inline
122 // it. This is a hack because we depend on the user marking their local
123 // variables as volatile if they are live across a setjmp call, and they
124 // probably won't do this in callers.
125 if (Function *F = CS.getCalledFunction())
126 if (F->isDeclaration() &&
127 (F->getName() == "setjmp" || F->getName() == "_setjmp"))
130 // Calls often compile into many machine instructions. Bump up their
131 // cost to reflect this.
132 if (!isa<IntrinsicInst>(II))
133 NumInsts += InlineConstants::CallPenalty;
136 if (const AllocaInst *AI = dyn_cast<AllocaInst>(II)) {
137 if (!AI->isStaticAlloca())
138 this->usesDynamicAlloca = true;
141 if (isa<ExtractElementInst>(II) || isa<VectorType>(II->getType()))
144 // Noop casts, including ptr <-> int, don't count.
145 if (const CastInst *CI = dyn_cast<CastInst>(II)) {
146 if (CI->isLosslessCast() || isa<IntToPtrInst>(CI) ||
147 isa<PtrToIntInst>(CI))
149 } else if (const GetElementPtrInst *GEPI = dyn_cast<GetElementPtrInst>(II)){
150 // If a GEP has all constant indices, it will probably be folded with
152 if (GEPI->hasAllConstantIndices())
159 if (isa<ReturnInst>(BB->getTerminator()))
162 // We never want to inline functions that contain an indirectbr. This is
163 // incorrect because all the blockaddress's (in static global initializers
164 // for example) would be referring to the original function, and this indirect
165 // jump would jump from the inlined copy of the function into the original
166 // function which is extremely undefined behavior.
167 if (isa<IndirectBrInst>(BB->getTerminator()))
171 /// analyzeFunction - Fill in the current structure with information gleaned
172 /// from the specified function.
173 void CodeMetrics::analyzeFunction(Function *F) {
174 // Look at the size of the callee.
175 for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB)
176 analyzeBasicBlock(&*BB);
179 /// analyzeFunction - Fill in the current structure with information gleaned
180 /// from the specified function.
181 void InlineCostAnalyzer::FunctionInfo::analyzeFunction(Function *F) {
182 Metrics.analyzeFunction(F);
184 // A function with exactly one return has it removed during the inlining
185 // process (see InlineFunction), so don't count it.
186 // FIXME: This knowledge should really be encoded outside of FunctionInfo.
187 if (Metrics.NumRets==1)
190 // Check out all of the arguments to the function, figuring out how much
191 // code can be eliminated if one of the arguments is a constant.
192 for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(); I != E; ++I)
193 ArgumentWeights.push_back(ArgInfo(CountCodeReductionForConstant(I),
194 CountCodeReductionForAlloca(I)));
197 // getInlineCost - The heuristic used to determine if we should inline the
198 // function call or not.
200 InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS,
201 SmallPtrSet<const Function *, 16> &NeverInline) {
202 Instruction *TheCall = CS.getInstruction();
203 Function *Callee = CS.getCalledFunction();
204 Function *Caller = TheCall->getParent()->getParent();
206 // Don't inline functions which can be redefined at link-time to mean
207 // something else. Don't inline functions marked noinline.
208 if (Callee->mayBeOverridden() ||
209 Callee->hasFnAttr(Attribute::NoInline) || NeverInline.count(Callee))
210 return llvm::InlineCost::getNever();
212 // InlineCost - This value measures how good of an inline candidate this call
213 // site is to inline. A lower inline cost make is more likely for the call to
214 // be inlined. This value may go negative.
218 // If there is only one call of the function, and it has internal linkage,
219 // make it almost guaranteed to be inlined.
221 if (Callee->hasLocalLinkage() && Callee->hasOneUse())
222 InlineCost += InlineConstants::LastCallToStaticBonus;
224 // If this function uses the coldcc calling convention, prefer not to inline
226 if (Callee->getCallingConv() == CallingConv::Cold)
227 InlineCost += InlineConstants::ColdccPenalty;
229 // If the instruction after the call, or if the normal destination of the
230 // invoke is an unreachable instruction, the function is noreturn. As such,
231 // there is little point in inlining this.
232 if (InvokeInst *II = dyn_cast<InvokeInst>(TheCall)) {
233 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
234 InlineCost += InlineConstants::NoreturnPenalty;
235 } else if (isa<UnreachableInst>(++BasicBlock::iterator(TheCall)))
236 InlineCost += InlineConstants::NoreturnPenalty;
238 // Get information about the callee...
239 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
241 // If we haven't calculated this information yet, do so now.
242 if (CalleeFI.Metrics.NumBlocks == 0)
243 CalleeFI.analyzeFunction(Callee);
245 // If we should never inline this, return a huge cost.
246 if (CalleeFI.Metrics.NeverInline)
247 return InlineCost::getNever();
249 // FIXME: It would be nice to kill off CalleeFI.NeverInline. Then we
250 // could move this up and avoid computing the FunctionInfo for
251 // things we are going to just return always inline for. This
252 // requires handling setjmp somewhere else, however.
253 if (!Callee->isDeclaration() && Callee->hasFnAttr(Attribute::AlwaysInline))
254 return InlineCost::getAlways();
256 if (CalleeFI.Metrics.usesDynamicAlloca) {
257 // Get infomation about the caller...
258 FunctionInfo &CallerFI = CachedFunctionInfo[Caller];
260 // If we haven't calculated this information yet, do so now.
261 if (CallerFI.Metrics.NumBlocks == 0)
262 CallerFI.analyzeFunction(Caller);
264 // Don't inline a callee with dynamic alloca into a caller without them.
265 // Functions containing dynamic alloca's are inefficient in various ways;
266 // don't create more inefficiency.
267 if (!CallerFI.Metrics.usesDynamicAlloca)
268 return InlineCost::getNever();
271 // Add to the inline quality for properties that make the call valuable to
272 // inline. This includes factors that indicate that the result of inlining
273 // the function will be optimizable. Currently this just looks at arguments
274 // passed into the function.
277 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
278 I != E; ++I, ++ArgNo) {
279 // Each argument passed in has a cost at both the caller and the callee
280 // sides. This favors functions that take many arguments over functions
281 // that take few arguments.
284 // If this is a function being passed in, it is very likely that we will be
285 // able to turn an indirect function call into a direct function call.
286 if (isa<Function>(I))
289 // If an alloca is passed in, inlining this function is likely to allow
290 // significant future optimization possibilities (like scalar promotion, and
291 // scalarization), so encourage the inlining of the function.
293 else if (isa<AllocaInst>(I)) {
294 if (ArgNo < CalleeFI.ArgumentWeights.size())
295 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].AllocaWeight;
297 // If this is a constant being passed into the function, use the argument
298 // weights calculated for the callee to determine how much will be folded
299 // away with this information.
300 } else if (isa<Constant>(I)) {
301 if (ArgNo < CalleeFI.ArgumentWeights.size())
302 InlineCost -= CalleeFI.ArgumentWeights[ArgNo].ConstantWeight;
306 // Now that we have considered all of the factors that make the call site more
307 // likely to be inlined, look at factors that make us not want to inline it.
309 // Don't inline into something too big, which would make it bigger.
310 // "size" here is the number of basic blocks, not instructions.
312 InlineCost += Caller->size()/15;
314 // Look at the size of the callee. Each instruction counts as 5.
315 InlineCost += CalleeFI.Metrics.NumInsts*5;
317 return llvm::InlineCost::get(InlineCost);
320 // getInlineFudgeFactor - Return a > 1.0 factor if the inliner should use a
321 // higher threshold to determine if the function call should be inlined.
322 float InlineCostAnalyzer::getInlineFudgeFactor(CallSite CS) {
323 Function *Callee = CS.getCalledFunction();
325 // Get information about the callee...
326 FunctionInfo &CalleeFI = CachedFunctionInfo[Callee];
328 // If we haven't calculated this information yet, do so now.
329 if (CalleeFI.Metrics.NumBlocks == 0)
330 CalleeFI.analyzeFunction(Callee);
333 // Single BB functions are often written to be inlined.
334 if (CalleeFI.Metrics.NumBlocks == 1)
337 // Be more aggressive if the function contains a good chunk (if it mades up
338 // at least 10% of the instructions) of vector instructions.
339 if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/2)
341 else if (CalleeFI.Metrics.NumVectorInsts > CalleeFI.Metrics.NumInsts/10)