X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FAnalysis%2FCodeMetrics.cpp;h=8cda01a24c0d4a3ea72df95b8732eeff42a50d63;hb=5be77762a3aa434ee877b0a03b98b5c3a7571918;hp=6c93f78629fd0606a028ca6afb2ac747d8221592;hpb=9b081d9691f4509af58919db5760235ca319c9da;p=oota-llvm.git diff --git a/lib/Analysis/CodeMetrics.cpp b/lib/Analysis/CodeMetrics.cpp index 6c93f78629f..8cda01a24c0 100644 --- a/lib/Analysis/CodeMetrics.cpp +++ b/lib/Analysis/CodeMetrics.cpp @@ -12,73 +12,24 @@ //===----------------------------------------------------------------------===// #include "llvm/Analysis/CodeMetrics.h" -#include "llvm/Function.h" +#include "llvm/Analysis/TargetTransformInfo.h" +#include "llvm/IR/DataLayout.h" +#include "llvm/IR/Function.h" +#include "llvm/IR/IntrinsicInst.h" #include "llvm/Support/CallSite.h" -#include "llvm/IntrinsicInst.h" -#include "llvm/Target/TargetData.h" using namespace llvm; -/// callIsSmall - If a call is likely to lower to a single target instruction, -/// or is otherwise deemed small return true. -/// TODO: Perhaps calls like memcpy, strcpy, etc? -bool llvm::callIsSmall(const Function *F) { - if (!F) return false; - - if (F->hasLocalLinkage()) return false; - - if (!F->hasName()) return false; - - StringRef Name = F->getName(); - - // These will all likely lower to a single selection DAG node. - if (Name == "copysign" || Name == "copysignf" || Name == "copysignl" || - Name == "fabs" || Name == "fabsf" || Name == "fabsl" || - Name == "sin" || Name == "sinf" || Name == "sinl" || - Name == "cos" || Name == "cosf" || Name == "cosl" || - Name == "sqrt" || Name == "sqrtf" || Name == "sqrtl" ) - return true; - - // These are all likely to be optimized into something smaller. - if (Name == "pow" || Name == "powf" || Name == "powl" || - Name == "exp2" || Name == "exp2l" || Name == "exp2f" || - Name == "floor" || Name == "floorf" || Name == "ceil" || - Name == "round" || Name == "ffs" || Name == "ffsl" || - Name == "abs" || Name == "labs" || Name == "llabs") - return true; - - return false; -} - /// analyzeBasicBlock - Fill in the current structure with information gleaned /// from the specified block. void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, - const TargetData *TD) { + const TargetTransformInfo &TTI) { ++NumBlocks; unsigned NumInstsBeforeThisBB = NumInsts; for (BasicBlock::const_iterator II = BB->begin(), E = BB->end(); II != E; ++II) { - if (isa(II)) continue; // PHI nodes don't count. - // Special handling for calls. if (isa(II) || isa(II)) { - if (const IntrinsicInst *IntrinsicI = dyn_cast(II)) { - switch (IntrinsicI->getIntrinsicID()) { - default: break; - case Intrinsic::dbg_declare: - case Intrinsic::dbg_value: - case Intrinsic::invariant_start: - case Intrinsic::invariant_end: - case Intrinsic::lifetime_start: - case Intrinsic::lifetime_end: - case Intrinsic::objectsize: - case Intrinsic::ptr_annotation: - case Intrinsic::var_annotation: - // These intrinsics don't count as size. - continue; - } - } - ImmutableCallSite CS(cast(II)); if (const Function *F = CS.getCalledFunction()) { @@ -94,12 +45,10 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, // for that case. if (F == BB->getParent()) isRecursive = true; - } - - if (!isa(II) && !callIsSmall(CS.getCalledFunction())) { - // Each argument to a call takes on average one instruction to set up. - NumInsts += CS.arg_size(); + if (TTI.isLoweredToCall(F)) + ++NumCalls; + } else { // We don't want inline asm to count as a call - that would prevent loop // unrolling. The argument setup cost is still real, though. if (!isa(CS.getCalledValue())) @@ -115,29 +64,15 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, if (isa(II) || II->getType()->isVectorTy()) ++NumVectorInsts; - if (const CastInst *CI = dyn_cast(II)) { - // Noop casts, including ptr <-> int, don't count. - if (CI->isLosslessCast() || isa(CI) || - isa(CI)) - continue; - // trunc to a native type is free (assuming the target has compare and - // shift-right of the same width). - if (isa(CI) && TD && - TD->isLegalInteger(TD->getTypeSizeInBits(CI->getType()))) - continue; - // Result of a cmp instruction is often extended (to be used by other - // cmp instructions, logical or return instructions). These are usually - // nop on most sane targets. - if (isa(CI->getOperand(0))) - continue; - } else if (const GetElementPtrInst *GEPI = dyn_cast(II)){ - // If a GEP has all constant indices, it will probably be folded with - // a load/store. - if (GEPI->hasAllConstantIndices()) - continue; - } + if (const CallInst *CI = dyn_cast(II)) + if (CI->hasFnAttr(Attribute::NoDuplicate)) + notDuplicatable = true; - ++NumInsts; + if (const InvokeInst *InvI = dyn_cast(II)) + if (InvI->hasFnAttr(Attribute::NoDuplicate)) + notDuplicatable = true; + + NumInsts += TTI.getUserCost(&*II); } if (isa(BB->getTerminator())) @@ -154,23 +89,8 @@ void CodeMetrics::analyzeBasicBlock(const BasicBlock *BB, // if someone is using a blockaddress without an indirectbr, and that // reference somehow ends up in another function or global, we probably // don't want to inline this function. - if (isa(BB->getTerminator())) - containsIndirectBr = true; + notDuplicatable |= isa(BB->getTerminator()); // Remember NumInsts for this BB. NumBBInsts[BB] = NumInsts - NumInstsBeforeThisBB; } - -void CodeMetrics::analyzeFunction(Function *F, const TargetData *TD) { - // If this function contains a call that "returns twice" (e.g., setjmp or - // _setjmp) and it isn't marked with "returns twice" itself, never inline it. - // This is a hack because we depend on the user marking their local variables - // as volatile if they are live across a setjmp call, and they probably - // won't do this in callers. - exposesReturnsTwice = F->callsFunctionThatReturnsTwice() && - !F->hasFnAttr(Attribute::ReturnsTwice); - - // Look at the size of the callee. - for (Function::const_iterator BB = F->begin(), E = F->end(); BB != E; ++BB) - analyzeBasicBlock(&*BB, TD); -}