1 //===- InlineCost.cpp - Cost analysis for inliner -------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements inline cost analysis.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Analysis/InlineCost.h"
15 #include "llvm/ADT/STLExtras.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/ConstantFolding.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/Analysis/TargetTransformInfo.h"
23 #include "llvm/IR/CallSite.h"
24 #include "llvm/IR/CallingConv.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/GetElementPtrTypeIterator.h"
27 #include "llvm/IR/GlobalAlias.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/IntrinsicInst.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/Support/Debug.h"
32 #include "llvm/Support/raw_ostream.h"
36 #define DEBUG_TYPE "inline-cost"
38 STATISTIC(NumCallsAnalyzed, "Number of call sites analyzed");
42 class CallAnalyzer : public InstVisitor<CallAnalyzer, bool> {
43 typedef InstVisitor<CallAnalyzer, bool> Base;
44 friend class InstVisitor<CallAnalyzer, bool>;
46 // DataLayout if available, or null.
47 const DataLayout *const DL;
49 /// The TargetTransformInfo available for this compilation.
50 const TargetTransformInfo &TTI;
52 // The called function.
58 bool IsCallerRecursive;
60 bool ExposesReturnsTwice;
61 bool HasDynamicAlloca;
62 bool ContainsNoDuplicateCall;
66 /// Number of bytes allocated statically by the callee.
67 uint64_t AllocatedSize;
68 unsigned NumInstructions, NumVectorInstructions;
69 int FiftyPercentVectorBonus, TenPercentVectorBonus;
72 // While we walk the potentially-inlined instructions, we build up and
73 // maintain a mapping of simplified values specific to this callsite. The
74 // idea is to propagate any special information we have about arguments to
75 // this call through the inlinable section of the function, and account for
76 // likely simplifications post-inlining. The most important aspect we track
77 // is CFG altering simplifications -- when we prove a basic block dead, that
78 // can cause dramatic shifts in the cost of inlining a function.
79 DenseMap<Value *, Constant *> SimplifiedValues;
81 // Keep track of the values which map back (through function arguments) to
82 // allocas on the caller stack which could be simplified through SROA.
83 DenseMap<Value *, Value *> SROAArgValues;
85 // The mapping of caller Alloca values to their accumulated cost savings. If
86 // we have to disable SROA for one of the allocas, this tells us how much
87 // cost must be added.
88 DenseMap<Value *, int> SROAArgCosts;
90 // Keep track of values which map to a pointer base and constant offset.
91 DenseMap<Value *, std::pair<Value *, APInt> > ConstantOffsetPtrs;
93 // Custom simplification helper routines.
94 bool isAllocaDerivedArg(Value *V);
95 bool lookupSROAArgAndCost(Value *V, Value *&Arg,
96 DenseMap<Value *, int>::iterator &CostIt);
97 void disableSROA(DenseMap<Value *, int>::iterator CostIt);
98 void disableSROA(Value *V);
99 void accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
100 int InstructionCost);
101 bool isGEPOffsetConstant(GetElementPtrInst &GEP);
102 bool accumulateGEPOffset(GEPOperator &GEP, APInt &Offset);
103 bool simplifyCallSite(Function *F, CallSite CS);
104 ConstantInt *stripAndComputeInBoundsConstantOffsets(Value *&V);
106 // Custom analysis routines.
107 bool analyzeBlock(BasicBlock *BB);
109 // Disable several entry points to the visitor so we don't accidentally use
110 // them by declaring but not defining them here.
111 void visit(Module *); void visit(Module &);
112 void visit(Function *); void visit(Function &);
113 void visit(BasicBlock *); void visit(BasicBlock &);
115 // Provide base case for our instruction visit.
116 bool visitInstruction(Instruction &I);
118 // Our visit overrides.
119 bool visitAlloca(AllocaInst &I);
120 bool visitPHI(PHINode &I);
121 bool visitGetElementPtr(GetElementPtrInst &I);
122 bool visitBitCast(BitCastInst &I);
123 bool visitPtrToInt(PtrToIntInst &I);
124 bool visitIntToPtr(IntToPtrInst &I);
125 bool visitCastInst(CastInst &I);
126 bool visitUnaryInstruction(UnaryInstruction &I);
127 bool visitCmpInst(CmpInst &I);
128 bool visitSub(BinaryOperator &I);
129 bool visitBinaryOperator(BinaryOperator &I);
130 bool visitLoad(LoadInst &I);
131 bool visitStore(StoreInst &I);
132 bool visitExtractValue(ExtractValueInst &I);
133 bool visitInsertValue(InsertValueInst &I);
134 bool visitCallSite(CallSite CS);
135 bool visitReturnInst(ReturnInst &RI);
136 bool visitBranchInst(BranchInst &BI);
137 bool visitSwitchInst(SwitchInst &SI);
138 bool visitIndirectBrInst(IndirectBrInst &IBI);
139 bool visitResumeInst(ResumeInst &RI);
140 bool visitUnreachableInst(UnreachableInst &I);
143 CallAnalyzer(const DataLayout *DL, const TargetTransformInfo &TTI,
144 Function &Callee, int Threshold)
145 : DL(DL), TTI(TTI), F(Callee), Threshold(Threshold), Cost(0),
146 IsCallerRecursive(false), IsRecursiveCall(false),
147 ExposesReturnsTwice(false), HasDynamicAlloca(false),
148 ContainsNoDuplicateCall(false), HasReturn(false), HasIndirectBr(false),
149 AllocatedSize(0), NumInstructions(0), NumVectorInstructions(0),
150 FiftyPercentVectorBonus(0), TenPercentVectorBonus(0), VectorBonus(0),
151 NumConstantArgs(0), NumConstantOffsetPtrArgs(0), NumAllocaArgs(0),
152 NumConstantPtrCmps(0), NumConstantPtrDiffs(0),
153 NumInstructionsSimplified(0), SROACostSavings(0),
154 SROACostSavingsLost(0) {}
156 bool analyzeCall(CallSite CS);
158 int getThreshold() { return Threshold; }
159 int getCost() { return Cost; }
161 // Keep a bunch of stats about the cost savings found so we can print them
162 // out when debugging.
163 unsigned NumConstantArgs;
164 unsigned NumConstantOffsetPtrArgs;
165 unsigned NumAllocaArgs;
166 unsigned NumConstantPtrCmps;
167 unsigned NumConstantPtrDiffs;
168 unsigned NumInstructionsSimplified;
169 unsigned SROACostSavings;
170 unsigned SROACostSavingsLost;
177 /// \brief Test whether the given value is an Alloca-derived function argument.
178 bool CallAnalyzer::isAllocaDerivedArg(Value *V) {
179 return SROAArgValues.count(V);
182 /// \brief Lookup the SROA-candidate argument and cost iterator which V maps to.
183 /// Returns false if V does not map to a SROA-candidate.
184 bool CallAnalyzer::lookupSROAArgAndCost(
185 Value *V, Value *&Arg, DenseMap<Value *, int>::iterator &CostIt) {
186 if (SROAArgValues.empty() || SROAArgCosts.empty())
189 DenseMap<Value *, Value *>::iterator ArgIt = SROAArgValues.find(V);
190 if (ArgIt == SROAArgValues.end())
194 CostIt = SROAArgCosts.find(Arg);
195 return CostIt != SROAArgCosts.end();
198 /// \brief Disable SROA for the candidate marked by this cost iterator.
200 /// This marks the candidate as no longer viable for SROA, and adds the cost
201 /// savings associated with it back into the inline cost measurement.
202 void CallAnalyzer::disableSROA(DenseMap<Value *, int>::iterator CostIt) {
203 // If we're no longer able to perform SROA we need to undo its cost savings
204 // and prevent subsequent analysis.
205 Cost += CostIt->second;
206 SROACostSavings -= CostIt->second;
207 SROACostSavingsLost += CostIt->second;
208 SROAArgCosts.erase(CostIt);
211 /// \brief If 'V' maps to a SROA candidate, disable SROA for it.
212 void CallAnalyzer::disableSROA(Value *V) {
214 DenseMap<Value *, int>::iterator CostIt;
215 if (lookupSROAArgAndCost(V, SROAArg, CostIt))
219 /// \brief Accumulate the given cost for a particular SROA candidate.
220 void CallAnalyzer::accumulateSROACost(DenseMap<Value *, int>::iterator CostIt,
221 int InstructionCost) {
222 CostIt->second += InstructionCost;
223 SROACostSavings += InstructionCost;
226 /// \brief Check whether a GEP's indices are all constant.
228 /// Respects any simplified values known during the analysis of this callsite.
229 bool CallAnalyzer::isGEPOffsetConstant(GetElementPtrInst &GEP) {
230 for (User::op_iterator I = GEP.idx_begin(), E = GEP.idx_end(); I != E; ++I)
231 if (!isa<Constant>(*I) && !SimplifiedValues.lookup(*I))
237 /// \brief Accumulate a constant GEP offset into an APInt if possible.
239 /// Returns false if unable to compute the offset for any reason. Respects any
240 /// simplified values known during the analysis of this callsite.
241 bool CallAnalyzer::accumulateGEPOffset(GEPOperator &GEP, APInt &Offset) {
245 unsigned IntPtrWidth = DL->getPointerSizeInBits();
246 assert(IntPtrWidth == Offset.getBitWidth());
248 for (gep_type_iterator GTI = gep_type_begin(GEP), GTE = gep_type_end(GEP);
250 ConstantInt *OpC = dyn_cast<ConstantInt>(GTI.getOperand());
252 if (Constant *SimpleOp = SimplifiedValues.lookup(GTI.getOperand()))
253 OpC = dyn_cast<ConstantInt>(SimpleOp);
256 if (OpC->isZero()) continue;
258 // Handle a struct index, which adds its field offset to the pointer.
259 if (StructType *STy = dyn_cast<StructType>(*GTI)) {
260 unsigned ElementIdx = OpC->getZExtValue();
261 const StructLayout *SL = DL->getStructLayout(STy);
262 Offset += APInt(IntPtrWidth, SL->getElementOffset(ElementIdx));
266 APInt TypeSize(IntPtrWidth, DL->getTypeAllocSize(GTI.getIndexedType()));
267 Offset += OpC->getValue().sextOrTrunc(IntPtrWidth) * TypeSize;
272 bool CallAnalyzer::visitAlloca(AllocaInst &I) {
273 // Check whether inlining will turn a dynamic alloca into a static
274 // alloca, and handle that case.
275 if (I.isArrayAllocation()) {
276 if (Constant *Size = SimplifiedValues.lookup(I.getArraySize())) {
277 ConstantInt *AllocSize = dyn_cast<ConstantInt>(Size);
278 assert(AllocSize && "Allocation size not a constant int?");
279 Type *Ty = I.getAllocatedType();
280 AllocatedSize += Ty->getPrimitiveSizeInBits() * AllocSize->getZExtValue();
281 return Base::visitAlloca(I);
285 // Accumulate the allocated size.
286 if (I.isStaticAlloca()) {
287 Type *Ty = I.getAllocatedType();
288 AllocatedSize += (DL ? DL->getTypeAllocSize(Ty) :
289 Ty->getPrimitiveSizeInBits());
292 // We will happily inline static alloca instructions.
293 if (I.isStaticAlloca())
294 return Base::visitAlloca(I);
296 // FIXME: This is overly conservative. Dynamic allocas are inefficient for
297 // a variety of reasons, and so we would like to not inline them into
298 // functions which don't currently have a dynamic alloca. This simply
299 // disables inlining altogether in the presence of a dynamic alloca.
300 HasDynamicAlloca = true;
304 bool CallAnalyzer::visitPHI(PHINode &I) {
305 // FIXME: We should potentially be tracking values through phi nodes,
306 // especially when they collapse to a single value due to deleted CFG edges
309 // FIXME: We need to propagate SROA *disabling* through phi nodes, even
310 // though we don't want to propagate it's bonuses. The idea is to disable
311 // SROA if it *might* be used in an inappropriate manner.
313 // Phi nodes are always zero-cost.
317 bool CallAnalyzer::visitGetElementPtr(GetElementPtrInst &I) {
319 DenseMap<Value *, int>::iterator CostIt;
320 bool SROACandidate = lookupSROAArgAndCost(I.getPointerOperand(),
323 // Try to fold GEPs of constant-offset call site argument pointers. This
324 // requires target data and inbounds GEPs.
325 if (DL && I.isInBounds()) {
326 // Check if we have a base + offset for the pointer.
327 Value *Ptr = I.getPointerOperand();
328 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Ptr);
329 if (BaseAndOffset.first) {
330 // Check if the offset of this GEP is constant, and if so accumulate it
332 if (!accumulateGEPOffset(cast<GEPOperator>(I), BaseAndOffset.second)) {
333 // Non-constant GEPs aren't folded, and disable SROA.
339 // Add the result as a new mapping to Base + Offset.
340 ConstantOffsetPtrs[&I] = BaseAndOffset;
342 // Also handle SROA candidates here, we already know that the GEP is
343 // all-constant indexed.
345 SROAArgValues[&I] = SROAArg;
351 if (isGEPOffsetConstant(I)) {
353 SROAArgValues[&I] = SROAArg;
355 // Constant GEPs are modeled as free.
359 // Variable GEPs will require math and will disable SROA.
365 bool CallAnalyzer::visitBitCast(BitCastInst &I) {
366 // Propagate constants through bitcasts.
367 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
369 COp = SimplifiedValues.lookup(I.getOperand(0));
371 if (Constant *C = ConstantExpr::getBitCast(COp, I.getType())) {
372 SimplifiedValues[&I] = C;
376 // Track base/offsets through casts
377 std::pair<Value *, APInt> BaseAndOffset
378 = ConstantOffsetPtrs.lookup(I.getOperand(0));
379 // Casts don't change the offset, just wrap it up.
380 if (BaseAndOffset.first)
381 ConstantOffsetPtrs[&I] = BaseAndOffset;
383 // Also look for SROA candidates here.
385 DenseMap<Value *, int>::iterator CostIt;
386 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
387 SROAArgValues[&I] = SROAArg;
389 // Bitcasts are always zero cost.
393 bool CallAnalyzer::visitPtrToInt(PtrToIntInst &I) {
394 const DataLayout *DL = I.getDataLayout();
395 // Propagate constants through ptrtoint.
396 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
398 COp = SimplifiedValues.lookup(I.getOperand(0));
400 if (Constant *C = ConstantExpr::getPtrToInt(COp, I.getType())) {
401 SimplifiedValues[&I] = C;
405 // Track base/offset pairs when converted to a plain integer provided the
406 // integer is large enough to represent the pointer.
407 unsigned IntegerSize = I.getType()->getScalarSizeInBits();
408 if (DL && IntegerSize >= DL->getPointerSizeInBits()) {
409 std::pair<Value *, APInt> BaseAndOffset
410 = ConstantOffsetPtrs.lookup(I.getOperand(0));
411 if (BaseAndOffset.first)
412 ConstantOffsetPtrs[&I] = BaseAndOffset;
415 // This is really weird. Technically, ptrtoint will disable SROA. However,
416 // unless that ptrtoint is *used* somewhere in the live basic blocks after
417 // inlining, it will be nuked, and SROA should proceed. All of the uses which
418 // would block SROA would also block SROA if applied directly to a pointer,
419 // and so we can just add the integer in here. The only places where SROA is
420 // preserved either cannot fire on an integer, or won't in-and-of themselves
421 // disable SROA (ext) w/o some later use that we would see and disable.
423 DenseMap<Value *, int>::iterator CostIt;
424 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt))
425 SROAArgValues[&I] = SROAArg;
427 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
430 bool CallAnalyzer::visitIntToPtr(IntToPtrInst &I) {
431 const DataLayout *DL = I.getDataLayout();
432 // Propagate constants through ptrtoint.
433 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
435 COp = SimplifiedValues.lookup(I.getOperand(0));
437 if (Constant *C = ConstantExpr::getIntToPtr(COp, I.getType())) {
438 SimplifiedValues[&I] = C;
442 // Track base/offset pairs when round-tripped through a pointer without
443 // modifications provided the integer is not too large.
444 Value *Op = I.getOperand(0);
445 unsigned IntegerSize = Op->getType()->getScalarSizeInBits();
446 if (DL && IntegerSize <= DL->getPointerSizeInBits()) {
447 std::pair<Value *, APInt> BaseAndOffset = ConstantOffsetPtrs.lookup(Op);
448 if (BaseAndOffset.first)
449 ConstantOffsetPtrs[&I] = BaseAndOffset;
452 // "Propagate" SROA here in the same manner as we do for ptrtoint above.
454 DenseMap<Value *, int>::iterator CostIt;
455 if (lookupSROAArgAndCost(Op, SROAArg, CostIt))
456 SROAArgValues[&I] = SROAArg;
458 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
461 bool CallAnalyzer::visitCastInst(CastInst &I) {
462 // Propagate constants through ptrtoint.
463 Constant *COp = dyn_cast<Constant>(I.getOperand(0));
465 COp = SimplifiedValues.lookup(I.getOperand(0));
467 if (Constant *C = ConstantExpr::getCast(I.getOpcode(), COp, I.getType())) {
468 SimplifiedValues[&I] = C;
472 // Disable SROA in the face of arbitrary casts we don't whitelist elsewhere.
473 disableSROA(I.getOperand(0));
475 return TargetTransformInfo::TCC_Free == TTI.getUserCost(&I);
478 bool CallAnalyzer::visitUnaryInstruction(UnaryInstruction &I) {
479 Value *Operand = I.getOperand(0);
480 Constant *COp = dyn_cast<Constant>(Operand);
482 COp = SimplifiedValues.lookup(Operand);
484 if (Constant *C = ConstantFoldInstOperands(I.getOpcode(), I.getType(),
486 SimplifiedValues[&I] = C;
490 // Disable any SROA on the argument to arbitrary unary operators.
491 disableSROA(Operand);
496 bool CallAnalyzer::visitCmpInst(CmpInst &I) {
497 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
498 // First try to handle simplified comparisons.
499 if (!isa<Constant>(LHS))
500 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
502 if (!isa<Constant>(RHS))
503 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
505 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
506 if (Constant *CRHS = dyn_cast<Constant>(RHS))
507 if (Constant *C = ConstantExpr::getCompare(I.getPredicate(), CLHS, CRHS)) {
508 SimplifiedValues[&I] = C;
513 if (I.getOpcode() == Instruction::FCmp)
516 // Otherwise look for a comparison between constant offset pointers with
518 Value *LHSBase, *RHSBase;
519 APInt LHSOffset, RHSOffset;
520 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
522 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
523 if (RHSBase && LHSBase == RHSBase) {
524 // We have common bases, fold the icmp to a constant based on the
526 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
527 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
528 if (Constant *C = ConstantExpr::getICmp(I.getPredicate(), CLHS, CRHS)) {
529 SimplifiedValues[&I] = C;
530 ++NumConstantPtrCmps;
536 // If the comparison is an equality comparison with null, we can simplify it
537 // for any alloca-derived argument.
538 if (I.isEquality() && isa<ConstantPointerNull>(I.getOperand(1)))
539 if (isAllocaDerivedArg(I.getOperand(0))) {
540 // We can actually predict the result of comparisons between an
541 // alloca-derived value and null. Note that this fires regardless of
543 bool IsNotEqual = I.getPredicate() == CmpInst::ICMP_NE;
544 SimplifiedValues[&I] = IsNotEqual ? ConstantInt::getTrue(I.getType())
545 : ConstantInt::getFalse(I.getType());
549 // Finally check for SROA candidates in comparisons.
551 DenseMap<Value *, int>::iterator CostIt;
552 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
553 if (isa<ConstantPointerNull>(I.getOperand(1))) {
554 accumulateSROACost(CostIt, InlineConstants::InstrCost);
564 bool CallAnalyzer::visitSub(BinaryOperator &I) {
565 // Try to handle a special case: we can fold computing the difference of two
566 // constant-related pointers.
567 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
568 Value *LHSBase, *RHSBase;
569 APInt LHSOffset, RHSOffset;
570 std::tie(LHSBase, LHSOffset) = ConstantOffsetPtrs.lookup(LHS);
572 std::tie(RHSBase, RHSOffset) = ConstantOffsetPtrs.lookup(RHS);
573 if (RHSBase && LHSBase == RHSBase) {
574 // We have common bases, fold the subtract to a constant based on the
576 Constant *CLHS = ConstantInt::get(LHS->getContext(), LHSOffset);
577 Constant *CRHS = ConstantInt::get(RHS->getContext(), RHSOffset);
578 if (Constant *C = ConstantExpr::getSub(CLHS, CRHS)) {
579 SimplifiedValues[&I] = C;
580 ++NumConstantPtrDiffs;
586 // Otherwise, fall back to the generic logic for simplifying and handling
588 return Base::visitSub(I);
591 bool CallAnalyzer::visitBinaryOperator(BinaryOperator &I) {
592 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
593 if (!isa<Constant>(LHS))
594 if (Constant *SimpleLHS = SimplifiedValues.lookup(LHS))
596 if (!isa<Constant>(RHS))
597 if (Constant *SimpleRHS = SimplifiedValues.lookup(RHS))
599 Value *SimpleV = SimplifyBinOp(I.getOpcode(), LHS, RHS, DL);
600 if (Constant *C = dyn_cast_or_null<Constant>(SimpleV)) {
601 SimplifiedValues[&I] = C;
605 // Disable any SROA on arguments to arbitrary, unsimplified binary operators.
612 bool CallAnalyzer::visitLoad(LoadInst &I) {
614 DenseMap<Value *, int>::iterator CostIt;
615 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
617 accumulateSROACost(CostIt, InlineConstants::InstrCost);
627 bool CallAnalyzer::visitStore(StoreInst &I) {
629 DenseMap<Value *, int>::iterator CostIt;
630 if (lookupSROAArgAndCost(I.getOperand(0), SROAArg, CostIt)) {
632 accumulateSROACost(CostIt, InlineConstants::InstrCost);
642 bool CallAnalyzer::visitExtractValue(ExtractValueInst &I) {
643 // Constant folding for extract value is trivial.
644 Constant *C = dyn_cast<Constant>(I.getAggregateOperand());
646 C = SimplifiedValues.lookup(I.getAggregateOperand());
648 SimplifiedValues[&I] = ConstantExpr::getExtractValue(C, I.getIndices());
652 // SROA can look through these but give them a cost.
656 bool CallAnalyzer::visitInsertValue(InsertValueInst &I) {
657 // Constant folding for insert value is trivial.
658 Constant *AggC = dyn_cast<Constant>(I.getAggregateOperand());
660 AggC = SimplifiedValues.lookup(I.getAggregateOperand());
661 Constant *InsertedC = dyn_cast<Constant>(I.getInsertedValueOperand());
663 InsertedC = SimplifiedValues.lookup(I.getInsertedValueOperand());
664 if (AggC && InsertedC) {
665 SimplifiedValues[&I] = ConstantExpr::getInsertValue(AggC, InsertedC,
670 // SROA can look through these but give them a cost.
674 /// \brief Try to simplify a call site.
676 /// Takes a concrete function and callsite and tries to actually simplify it by
677 /// analyzing the arguments and call itself with instsimplify. Returns true if
678 /// it has simplified the callsite to some other entity (a constant), making it
680 bool CallAnalyzer::simplifyCallSite(Function *F, CallSite CS) {
681 // FIXME: Using the instsimplify logic directly for this is inefficient
682 // because we have to continually rebuild the argument list even when no
683 // simplifications can be performed. Until that is fixed with remapping
684 // inside of instsimplify, directly constant fold calls here.
685 if (!canConstantFoldCallTo(F))
688 // Try to re-map the arguments to constants.
689 SmallVector<Constant *, 4> ConstantArgs;
690 ConstantArgs.reserve(CS.arg_size());
691 for (CallSite::arg_iterator I = CS.arg_begin(), E = CS.arg_end();
693 Constant *C = dyn_cast<Constant>(*I);
695 C = dyn_cast_or_null<Constant>(SimplifiedValues.lookup(*I));
697 return false; // This argument doesn't map to a constant.
699 ConstantArgs.push_back(C);
701 if (Constant *C = ConstantFoldCall(F, ConstantArgs)) {
702 SimplifiedValues[CS.getInstruction()] = C;
709 bool CallAnalyzer::visitCallSite(CallSite CS) {
710 if (CS.hasFnAttr(Attribute::ReturnsTwice) &&
711 !F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
712 Attribute::ReturnsTwice)) {
713 // This aborts the entire analysis.
714 ExposesReturnsTwice = true;
718 cast<CallInst>(CS.getInstruction())->cannotDuplicate())
719 ContainsNoDuplicateCall = true;
721 if (Function *F = CS.getCalledFunction()) {
722 // When we have a concrete function, first try to simplify it directly.
723 if (simplifyCallSite(F, CS))
726 // Next check if it is an intrinsic we know about.
727 // FIXME: Lift this into part of the InstVisitor.
728 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(CS.getInstruction())) {
729 switch (II->getIntrinsicID()) {
731 return Base::visitCallSite(CS);
733 case Intrinsic::memset:
734 case Intrinsic::memcpy:
735 case Intrinsic::memmove:
736 // SROA can usually chew through these intrinsics, but they aren't free.
741 if (F == CS.getInstruction()->getParent()->getParent()) {
742 // This flag will fully abort the analysis, so don't bother with anything
744 IsRecursiveCall = true;
748 if (TTI.isLoweredToCall(F)) {
749 // We account for the average 1 instruction per call argument setup
751 Cost += CS.arg_size() * InlineConstants::InstrCost;
753 // Everything other than inline ASM will also have a significant cost
754 // merely from making the call.
755 if (!isa<InlineAsm>(CS.getCalledValue()))
756 Cost += InlineConstants::CallPenalty;
759 return Base::visitCallSite(CS);
762 // Otherwise we're in a very special case -- an indirect function call. See
763 // if we can be particularly clever about this.
764 Value *Callee = CS.getCalledValue();
766 // First, pay the price of the argument setup. We account for the average
767 // 1 instruction per call argument setup here.
768 Cost += CS.arg_size() * InlineConstants::InstrCost;
770 // Next, check if this happens to be an indirect function call to a known
771 // function in this inline context. If not, we've done all we can.
772 Function *F = dyn_cast_or_null<Function>(SimplifiedValues.lookup(Callee));
774 return Base::visitCallSite(CS);
776 // If we have a constant that we are calling as a function, we can peer
777 // through it and see the function target. This happens not infrequently
778 // during devirtualization and so we want to give it a hefty bonus for
779 // inlining, but cap that bonus in the event that inlining wouldn't pan
780 // out. Pretend to inline the function, with a custom threshold.
781 CallAnalyzer CA(DL, TTI, *F, InlineConstants::IndirectCallThreshold);
782 if (CA.analyzeCall(CS)) {
783 // We were able to inline the indirect call! Subtract the cost from the
784 // bonus we want to apply, but don't go below zero.
785 Cost -= std::max(0, InlineConstants::IndirectCallThreshold - CA.getCost());
788 return Base::visitCallSite(CS);
791 bool CallAnalyzer::visitReturnInst(ReturnInst &RI) {
792 // At least one return instruction will be free after inlining.
793 bool Free = !HasReturn;
798 bool CallAnalyzer::visitBranchInst(BranchInst &BI) {
799 // We model unconditional branches as essentially free -- they really
800 // shouldn't exist at all, but handling them makes the behavior of the
801 // inliner more regular and predictable. Interestingly, conditional branches
802 // which will fold away are also free.
803 return BI.isUnconditional() || isa<ConstantInt>(BI.getCondition()) ||
804 dyn_cast_or_null<ConstantInt>(
805 SimplifiedValues.lookup(BI.getCondition()));
808 bool CallAnalyzer::visitSwitchInst(SwitchInst &SI) {
809 // We model unconditional switches as free, see the comments on handling
811 if (isa<ConstantInt>(SI.getCondition()))
813 if (Value *V = SimplifiedValues.lookup(SI.getCondition()))
814 if (isa<ConstantInt>(V))
817 // Otherwise, we need to accumulate a cost proportional to the number of
818 // distinct successor blocks. This fan-out in the CFG cannot be represented
819 // for free even if we can represent the core switch as a jumptable that
820 // takes a single instruction.
822 // NB: We convert large switches which are just used to initialize large phi
823 // nodes to lookup tables instead in simplify-cfg, so this shouldn't prevent
824 // inlining those. It will prevent inlining in cases where the optimization
825 // does not (yet) fire.
826 SmallPtrSet<BasicBlock *, 8> SuccessorBlocks;
827 SuccessorBlocks.insert(SI.getDefaultDest());
828 for (auto I = SI.case_begin(), E = SI.case_end(); I != E; ++I)
829 SuccessorBlocks.insert(I.getCaseSuccessor());
830 // Add cost corresponding to the number of distinct destinations. The first
831 // we model as free because of fallthrough.
832 Cost += (SuccessorBlocks.size() - 1) * InlineConstants::InstrCost;
836 bool CallAnalyzer::visitIndirectBrInst(IndirectBrInst &IBI) {
837 // We never want to inline functions that contain an indirectbr. This is
838 // incorrect because all the blockaddress's (in static global initializers
839 // for example) would be referring to the original function, and this
840 // indirect jump would jump from the inlined copy of the function into the
841 // original function which is extremely undefined behavior.
842 // FIXME: This logic isn't really right; we can safely inline functions with
843 // indirectbr's as long as no other function or global references the
844 // blockaddress of a block within the current function. And as a QOI issue,
845 // if someone is using a blockaddress without an indirectbr, and that
846 // reference somehow ends up in another function or global, we probably don't
847 // want to inline this function.
848 HasIndirectBr = true;
852 bool CallAnalyzer::visitResumeInst(ResumeInst &RI) {
853 // FIXME: It's not clear that a single instruction is an accurate model for
854 // the inline cost of a resume instruction.
858 bool CallAnalyzer::visitUnreachableInst(UnreachableInst &I) {
859 // FIXME: It might be reasonably to discount the cost of instructions leading
860 // to unreachable as they have the lowest possible impact on both runtime and
862 return true; // No actual code is needed for unreachable.
865 bool CallAnalyzer::visitInstruction(Instruction &I) {
866 // Some instructions are free. All of the free intrinsics can also be
867 // handled by SROA, etc.
868 if (TargetTransformInfo::TCC_Free == TTI.getUserCost(&I))
871 // We found something we don't understand or can't handle. Mark any SROA-able
872 // values in the operand list as no longer viable.
873 for (User::op_iterator OI = I.op_begin(), OE = I.op_end(); OI != OE; ++OI)
880 /// \brief Analyze a basic block for its contribution to the inline cost.
882 /// This method walks the analyzer over every instruction in the given basic
883 /// block and accounts for their cost during inlining at this callsite. It
884 /// aborts early if the threshold has been exceeded or an impossible to inline
885 /// construct has been detected. It returns false if inlining is no longer
886 /// viable, and true if inlining remains viable.
887 bool CallAnalyzer::analyzeBlock(BasicBlock *BB) {
888 for (BasicBlock::iterator I = BB->begin(), E = BB->end(); I != E; ++I) {
889 // FIXME: Currently, the number of instructions in a function regardless of
890 // our ability to simplify them during inline to constants or dead code,
891 // are actually used by the vector bonus heuristic. As long as that's true,
892 // we have to special case debug intrinsics here to prevent differences in
893 // inlining due to debug symbols. Eventually, the number of unsimplified
894 // instructions shouldn't factor into the cost computation, but until then,
895 // hack around it here.
896 if (isa<DbgInfoIntrinsic>(I))
900 if (isa<ExtractElementInst>(I) || I->getType()->isVectorTy())
901 ++NumVectorInstructions;
903 // If the instruction simplified to a constant, there is no cost to this
904 // instruction. Visit the instructions using our InstVisitor to account for
905 // all of the per-instruction logic. The visit tree returns true if we
906 // consumed the instruction in any way, and false if the instruction's base
907 // cost should count against inlining.
909 ++NumInstructionsSimplified;
911 Cost += InlineConstants::InstrCost;
913 // If the visit this instruction detected an uninlinable pattern, abort.
914 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
918 // If the caller is a recursive function then we don't want to inline
919 // functions which allocate a lot of stack space because it would increase
920 // the caller stack usage dramatically.
921 if (IsCallerRecursive &&
922 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
925 if (NumVectorInstructions > NumInstructions/2)
926 VectorBonus = FiftyPercentVectorBonus;
927 else if (NumVectorInstructions > NumInstructions/10)
928 VectorBonus = TenPercentVectorBonus;
932 // Check if we've past the threshold so we don't spin in huge basic
933 // blocks that will never inline.
934 if (Cost > (Threshold + VectorBonus))
941 /// \brief Compute the base pointer and cumulative constant offsets for V.
943 /// This strips all constant offsets off of V, leaving it the base pointer, and
944 /// accumulates the total constant offset applied in the returned constant. It
945 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
946 /// no constant offsets applied.
947 ConstantInt *CallAnalyzer::stripAndComputeInBoundsConstantOffsets(Value *&V) {
948 if (!DL || !V->getType()->isPointerTy())
951 unsigned IntPtrWidth = DL->getPointerSizeInBits();
952 APInt Offset = APInt::getNullValue(IntPtrWidth);
954 // Even though we don't look through PHI nodes, we could be called on an
955 // instruction in an unreachable block, which may be on a cycle.
956 SmallPtrSet<Value *, 4> Visited;
959 if (GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
960 if (!GEP->isInBounds() || !accumulateGEPOffset(*GEP, Offset))
962 V = GEP->getPointerOperand();
963 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
964 V = cast<Operator>(V)->getOperand(0);
965 } else if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
966 if (GA->mayBeOverridden())
968 V = GA->getAliasee();
972 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
973 } while (Visited.insert(V));
975 Type *IntPtrTy = DL->getIntPtrType(V->getContext());
976 return cast<ConstantInt>(ConstantInt::get(IntPtrTy, Offset));
979 /// \brief Analyze a call site for potential inlining.
981 /// Returns true if inlining this call is viable, and false if it is not
982 /// viable. It computes the cost and adjusts the threshold based on numerous
983 /// factors and heuristics. If this method returns false but the computed cost
984 /// is below the computed threshold, then inlining was forcibly disabled by
985 /// some artifact of the routine.
986 bool CallAnalyzer::analyzeCall(CallSite CS) {
989 // Track whether the post-inlining function would have more than one basic
990 // block. A single basic block is often intended for inlining. Balloon the
991 // threshold by 50% until we pass the single-BB phase.
992 bool SingleBB = true;
993 int SingleBBBonus = Threshold / 2;
994 Threshold += SingleBBBonus;
996 // Perform some tweaks to the cost and threshold based on the direct
997 // callsite information.
999 // We want to more aggressively inline vector-dense kernels, so up the
1000 // threshold, and we'll lower it if the % of vector instructions gets too
1002 assert(NumInstructions == 0);
1003 assert(NumVectorInstructions == 0);
1004 FiftyPercentVectorBonus = Threshold;
1005 TenPercentVectorBonus = Threshold / 2;
1007 // Give out bonuses per argument, as the instructions setting them up will
1008 // be gone after inlining.
1009 for (unsigned I = 0, E = CS.arg_size(); I != E; ++I) {
1010 if (DL && CS.isByValArgument(I)) {
1011 // We approximate the number of loads and stores needed by dividing the
1012 // size of the byval type by the target's pointer size.
1013 PointerType *PTy = cast<PointerType>(CS.getArgument(I)->getType());
1014 unsigned TypeSize = DL->getTypeSizeInBits(PTy->getElementType());
1015 unsigned PointerSize = DL->getPointerSizeInBits();
1016 // Ceiling division.
1017 unsigned NumStores = (TypeSize + PointerSize - 1) / PointerSize;
1019 // If it generates more than 8 stores it is likely to be expanded as an
1020 // inline memcpy so we take that as an upper bound. Otherwise we assume
1021 // one load and one store per word copied.
1022 // FIXME: The maxStoresPerMemcpy setting from the target should be used
1023 // here instead of a magic number of 8, but it's not available via
1025 NumStores = std::min(NumStores, 8U);
1027 Cost -= 2 * NumStores * InlineConstants::InstrCost;
1029 // For non-byval arguments subtract off one instruction per call
1031 Cost -= InlineConstants::InstrCost;
1035 // If there is only one call of the function, and it has internal linkage,
1036 // the cost of inlining it drops dramatically.
1037 bool OnlyOneCallAndLocalLinkage = F.hasLocalLinkage() && F.hasOneUse() &&
1038 &F == CS.getCalledFunction();
1039 if (OnlyOneCallAndLocalLinkage)
1040 Cost += InlineConstants::LastCallToStaticBonus;
1042 // If the instruction after the call, or if the normal destination of the
1043 // invoke is an unreachable instruction, the function is noreturn. As such,
1044 // there is little point in inlining this unless there is literally zero
1046 Instruction *Instr = CS.getInstruction();
1047 if (InvokeInst *II = dyn_cast<InvokeInst>(Instr)) {
1048 if (isa<UnreachableInst>(II->getNormalDest()->begin()))
1050 } else if (isa<UnreachableInst>(++BasicBlock::iterator(Instr)))
1053 // If this function uses the coldcc calling convention, prefer not to inline
1055 if (F.getCallingConv() == CallingConv::Cold)
1056 Cost += InlineConstants::ColdccPenalty;
1058 // Check if we're done. This can happen due to bonuses and penalties.
1059 if (Cost > Threshold)
1065 Function *Caller = CS.getInstruction()->getParent()->getParent();
1066 // Check if the caller function is recursive itself.
1067 for (User *U : Caller->users()) {
1071 Instruction *I = Site.getInstruction();
1072 if (I->getParent()->getParent() == Caller) {
1073 IsCallerRecursive = true;
1078 // Populate our simplified values by mapping from function arguments to call
1079 // arguments with known important simplifications.
1080 CallSite::arg_iterator CAI = CS.arg_begin();
1081 for (Function::arg_iterator FAI = F.arg_begin(), FAE = F.arg_end();
1082 FAI != FAE; ++FAI, ++CAI) {
1083 assert(CAI != CS.arg_end());
1084 if (Constant *C = dyn_cast<Constant>(CAI))
1085 SimplifiedValues[FAI] = C;
1087 Value *PtrArg = *CAI;
1088 if (ConstantInt *C = stripAndComputeInBoundsConstantOffsets(PtrArg)) {
1089 ConstantOffsetPtrs[FAI] = std::make_pair(PtrArg, C->getValue());
1091 // We can SROA any pointer arguments derived from alloca instructions.
1092 if (isa<AllocaInst>(PtrArg)) {
1093 SROAArgValues[FAI] = PtrArg;
1094 SROAArgCosts[PtrArg] = 0;
1098 NumConstantArgs = SimplifiedValues.size();
1099 NumConstantOffsetPtrArgs = ConstantOffsetPtrs.size();
1100 NumAllocaArgs = SROAArgValues.size();
1102 // The worklist of live basic blocks in the callee *after* inlining. We avoid
1103 // adding basic blocks of the callee which can be proven to be dead for this
1104 // particular call site in order to get more accurate cost estimates. This
1105 // requires a somewhat heavyweight iteration pattern: we need to walk the
1106 // basic blocks in a breadth-first order as we insert live successors. To
1107 // accomplish this, prioritizing for small iterations because we exit after
1108 // crossing our threshold, we use a small-size optimized SetVector.
1109 typedef SetVector<BasicBlock *, SmallVector<BasicBlock *, 16>,
1110 SmallPtrSet<BasicBlock *, 16> > BBSetVector;
1111 BBSetVector BBWorklist;
1112 BBWorklist.insert(&F.getEntryBlock());
1113 // Note that we *must not* cache the size, this loop grows the worklist.
1114 for (unsigned Idx = 0; Idx != BBWorklist.size(); ++Idx) {
1115 // Bail out the moment we cross the threshold. This means we'll under-count
1116 // the cost, but only when undercounting doesn't matter.
1117 if (Cost > (Threshold + VectorBonus))
1120 BasicBlock *BB = BBWorklist[Idx];
1124 // Analyze the cost of this block. If we blow through the threshold, this
1125 // returns false, and we can bail on out.
1126 if (!analyzeBlock(BB)) {
1127 if (IsRecursiveCall || ExposesReturnsTwice || HasDynamicAlloca ||
1131 // If the caller is a recursive function then we don't want to inline
1132 // functions which allocate a lot of stack space because it would increase
1133 // the caller stack usage dramatically.
1134 if (IsCallerRecursive &&
1135 AllocatedSize > InlineConstants::TotalAllocaSizeRecursiveCaller)
1141 TerminatorInst *TI = BB->getTerminator();
1143 // Add in the live successors by first checking whether we have terminator
1144 // that may be simplified based on the values simplified by this call.
1145 if (BranchInst *BI = dyn_cast<BranchInst>(TI)) {
1146 if (BI->isConditional()) {
1147 Value *Cond = BI->getCondition();
1148 if (ConstantInt *SimpleCond
1149 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1150 BBWorklist.insert(BI->getSuccessor(SimpleCond->isZero() ? 1 : 0));
1154 } else if (SwitchInst *SI = dyn_cast<SwitchInst>(TI)) {
1155 Value *Cond = SI->getCondition();
1156 if (ConstantInt *SimpleCond
1157 = dyn_cast_or_null<ConstantInt>(SimplifiedValues.lookup(Cond))) {
1158 BBWorklist.insert(SI->findCaseValue(SimpleCond).getCaseSuccessor());
1163 // If we're unable to select a particular successor, just count all of
1165 for (unsigned TIdx = 0, TSize = TI->getNumSuccessors(); TIdx != TSize;
1167 BBWorklist.insert(TI->getSuccessor(TIdx));
1169 // If we had any successors at this point, than post-inlining is likely to
1170 // have them as well. Note that we assume any basic blocks which existed
1171 // due to branches or switches which folded above will also fold after
1173 if (SingleBB && TI->getNumSuccessors() > 1) {
1174 // Take off the bonus we applied to the threshold.
1175 Threshold -= SingleBBBonus;
1180 // If this is a noduplicate call, we can still inline as long as
1181 // inlining this would cause the removal of the caller (so the instruction
1182 // is not actually duplicated, just moved).
1183 if (!OnlyOneCallAndLocalLinkage && ContainsNoDuplicateCall)
1186 Threshold += VectorBonus;
1188 return Cost < Threshold;
1191 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1192 /// \brief Dump stats about this call's analysis.
1193 void CallAnalyzer::dump() {
1194 #define DEBUG_PRINT_STAT(x) dbgs() << " " #x ": " << x << "\n"
1195 DEBUG_PRINT_STAT(NumConstantArgs);
1196 DEBUG_PRINT_STAT(NumConstantOffsetPtrArgs);
1197 DEBUG_PRINT_STAT(NumAllocaArgs);
1198 DEBUG_PRINT_STAT(NumConstantPtrCmps);
1199 DEBUG_PRINT_STAT(NumConstantPtrDiffs);
1200 DEBUG_PRINT_STAT(NumInstructionsSimplified);
1201 DEBUG_PRINT_STAT(SROACostSavings);
1202 DEBUG_PRINT_STAT(SROACostSavingsLost);
1203 DEBUG_PRINT_STAT(ContainsNoDuplicateCall);
1204 DEBUG_PRINT_STAT(Cost);
1205 DEBUG_PRINT_STAT(Threshold);
1206 DEBUG_PRINT_STAT(VectorBonus);
1207 #undef DEBUG_PRINT_STAT
1211 INITIALIZE_PASS_BEGIN(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1213 INITIALIZE_AG_DEPENDENCY(TargetTransformInfo)
1214 INITIALIZE_PASS_END(InlineCostAnalysis, "inline-cost", "Inline Cost Analysis",
1217 char InlineCostAnalysis::ID = 0;
1219 InlineCostAnalysis::InlineCostAnalysis() : CallGraphSCCPass(ID) {}
1221 InlineCostAnalysis::~InlineCostAnalysis() {}
1223 void InlineCostAnalysis::getAnalysisUsage(AnalysisUsage &AU) const {
1224 AU.setPreservesAll();
1225 AU.addRequired<TargetTransformInfo>();
1226 CallGraphSCCPass::getAnalysisUsage(AU);
1229 bool InlineCostAnalysis::runOnSCC(CallGraphSCC &SCC) {
1230 TTI = &getAnalysis<TargetTransformInfo>();
1234 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, int Threshold) {
1235 return getInlineCost(CS, CS.getCalledFunction(), Threshold);
1238 /// \brief Test that two functions either have or have not the given attribute
1239 /// at the same time.
1240 static bool attributeMatches(Function *F1, Function *F2,
1241 Attribute::AttrKind Attr) {
1242 return F1->hasFnAttribute(Attr) == F2->hasFnAttribute(Attr);
1245 /// \brief Test that there are no attribute conflicts between Caller and Callee
1246 /// that prevent inlining.
1247 static bool functionsHaveCompatibleAttributes(Function *Caller,
1249 return attributeMatches(Caller, Callee, Attribute::SanitizeAddress) &&
1250 attributeMatches(Caller, Callee, Attribute::SanitizeMemory) &&
1251 attributeMatches(Caller, Callee, Attribute::SanitizeThread);
1254 InlineCost InlineCostAnalysis::getInlineCost(CallSite CS, Function *Callee,
1256 // Cannot inline indirect calls.
1258 return llvm::InlineCost::getNever();
1260 // Calls to functions with always-inline attributes should be inlined
1261 // whenever possible.
1262 if (Callee->hasFnAttribute(Attribute::AlwaysInline)) {
1263 if (isInlineViable(*Callee))
1264 return llvm::InlineCost::getAlways();
1265 return llvm::InlineCost::getNever();
1268 // Never inline functions with conflicting attributes (unless callee has
1269 // always-inline attribute).
1270 if (!functionsHaveCompatibleAttributes(CS.getCaller(), Callee))
1271 return llvm::InlineCost::getNever();
1273 // Don't inline this call if the caller has the optnone attribute.
1274 if (CS.getCaller()->hasFnAttribute(Attribute::OptimizeNone))
1275 return llvm::InlineCost::getNever();
1277 // Don't inline functions which can be redefined at link-time to mean
1278 // something else. Don't inline functions marked noinline or call sites
1280 if (Callee->mayBeOverridden() ||
1281 Callee->hasFnAttribute(Attribute::NoInline) || CS.isNoInline())
1282 return llvm::InlineCost::getNever();
1284 DEBUG(llvm::dbgs() << " Analyzing call of " << Callee->getName()
1287 CallAnalyzer CA(Callee->getDataLayout(), *TTI, *Callee, Threshold);
1288 bool ShouldInline = CA.analyzeCall(CS);
1292 // Check if there was a reason to force inlining or no inlining.
1293 if (!ShouldInline && CA.getCost() < CA.getThreshold())
1294 return InlineCost::getNever();
1295 if (ShouldInline && CA.getCost() >= CA.getThreshold())
1296 return InlineCost::getAlways();
1298 return llvm::InlineCost::get(CA.getCost(), CA.getThreshold());
1301 bool InlineCostAnalysis::isInlineViable(Function &F) {
1303 F.getAttributes().hasAttribute(AttributeSet::FunctionIndex,
1304 Attribute::ReturnsTwice);
1305 for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
1306 // Disallow inlining of functions which contain an indirect branch.
1307 if (isa<IndirectBrInst>(BI->getTerminator()))
1310 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
1316 // Disallow recursive calls.
1317 if (&F == CS.getCalledFunction())
1320 // Disallow calls which expose returns-twice to a function not previously
1321 // attributed as such.
1322 if (!ReturnsTwice && CS.isCall() &&
1323 cast<CallInst>(CS.getInstruction())->canReturnTwice())