1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides the implementation of a basic TargetTransformInfo pass
11 /// predicated on the target abstractions present in the target independent
12 /// code generator. It uses these (primarily TargetLowering) to model as much
13 /// of the TTI query interface as possible. It is included by most targets so
14 /// that they can specialize only a small subset of the query space.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
27 static cl::opt<unsigned>
28 PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
29 cl::desc("Threshold for partial unrolling"), cl::Hidden);
31 #define DEBUG_TYPE "basictti"
35 class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
36 const TargetMachine *TM;
38 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
39 /// are set if the result needs to be inserted and/or extracted from vectors.
40 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
42 /// Estimate the cost overhead of SK_Alternate shuffle.
43 unsigned getAltShuffleOverhead(Type *Ty) const;
45 const TargetLoweringBase *getTLI() const { return TM->getTargetLowering(); }
48 BasicTTI() : ImmutablePass(ID), TM(nullptr) {
49 llvm_unreachable("This pass cannot be directly constructed");
52 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) {
53 initializeBasicTTIPass(*PassRegistry::getPassRegistry());
56 void initializePass() override {
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 TargetTransformInfo::getAnalysisUsage(AU);
64 /// Pass identification.
67 /// Provide necessary pointer adjustments for the two base classes.
68 void *getAdjustedAnalysisPointer(const void *ID) override {
69 if (ID == &TargetTransformInfo::ID)
70 return (TargetTransformInfo*)this;
74 bool hasBranchDivergence() const override;
76 /// \name Scalar TTI Implementations
79 bool isLegalAddImmediate(int64_t imm) const override;
80 bool isLegalICmpImmediate(int64_t imm) const override;
81 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
82 int64_t BaseOffset, bool HasBaseReg,
83 int64_t Scale) const override;
84 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
85 int64_t BaseOffset, bool HasBaseReg,
86 int64_t Scale) const override;
87 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
88 bool isTypeLegal(Type *Ty) const override;
89 unsigned getJumpBufAlignment() const override;
90 unsigned getJumpBufSize() const override;
91 bool shouldBuildLookupTables() const override;
92 bool haveFastSqrt(Type *Ty) const override;
93 void getUnrollingPreferences(Loop *L,
94 UnrollingPreferences &UP) const override;
98 /// \name Vector TTI Implementations
101 unsigned getNumberOfRegisters(bool Vector) const override;
102 unsigned getMaximumUnrollFactor() const override;
103 unsigned getRegisterBitWidth(bool Vector) const override;
104 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
105 OperandValueKind) const override;
106 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
107 int Index, Type *SubTp) const override;
108 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
109 Type *Src) const override;
110 unsigned getCFInstrCost(unsigned Opcode) const override;
111 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
112 Type *CondTy) const override;
113 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
114 unsigned Index) const override;
115 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
116 unsigned AddressSpace) const override;
117 unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
118 ArrayRef<Type*> Tys) const override;
119 unsigned getNumberOfParts(Type *Tp) const override;
120 unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override;
121 unsigned getReductionCost(unsigned Opcode, Type *Ty,
122 bool IsPairwise) const override;
129 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti",
130 "Target independent code generator's TTI", true, true, false)
131 char BasicTTI::ID = 0;
134 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) {
135 return new BasicTTI(TM);
138 bool BasicTTI::hasBranchDivergence() const { return false; }
140 bool BasicTTI::isLegalAddImmediate(int64_t imm) const {
141 return getTLI()->isLegalAddImmediate(imm);
144 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const {
145 return getTLI()->isLegalICmpImmediate(imm);
148 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
149 int64_t BaseOffset, bool HasBaseReg,
150 int64_t Scale) const {
151 TargetLoweringBase::AddrMode AM;
153 AM.BaseOffs = BaseOffset;
154 AM.HasBaseReg = HasBaseReg;
156 return getTLI()->isLegalAddressingMode(AM, Ty);
159 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
160 int64_t BaseOffset, bool HasBaseReg,
161 int64_t Scale) const {
162 TargetLoweringBase::AddrMode AM;
164 AM.BaseOffs = BaseOffset;
165 AM.HasBaseReg = HasBaseReg;
167 return getTLI()->getScalingFactorCost(AM, Ty);
170 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const {
171 return getTLI()->isTruncateFree(Ty1, Ty2);
174 bool BasicTTI::isTypeLegal(Type *Ty) const {
175 EVT T = getTLI()->getValueType(Ty);
176 return getTLI()->isTypeLegal(T);
179 unsigned BasicTTI::getJumpBufAlignment() const {
180 return getTLI()->getJumpBufAlignment();
183 unsigned BasicTTI::getJumpBufSize() const {
184 return getTLI()->getJumpBufSize();
187 bool BasicTTI::shouldBuildLookupTables() const {
188 const TargetLoweringBase *TLI = getTLI();
189 return TLI->supportJumpTables() &&
190 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
191 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
194 bool BasicTTI::haveFastSqrt(Type *Ty) const {
195 const TargetLoweringBase *TLI = getTLI();
196 EVT VT = TLI->getValueType(Ty);
197 return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
200 void BasicTTI::getUnrollingPreferences(Loop *L,
201 UnrollingPreferences &UP) const {
202 // This unrolling functionality is target independent, but to provide some
203 // motivation for its intended use, for x86:
205 // According to the Intel 64 and IA-32 Architectures Optimization Reference
206 // Manual, Intel Core models and later have a loop stream detector
207 // (and associated uop queue) that can benefit from partial unrolling.
208 // The relevant requirements are:
209 // - The loop must have no more than 4 (8 for Nehalem and later) branches
210 // taken, and none of them may be calls.
211 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
213 // According to the Software Optimization Guide for AMD Family 15h Processors,
214 // models 30h-4fh (Steamroller and later) have a loop predictor and loop
215 // buffer which can benefit from partial unrolling.
216 // The relevant requirements are:
217 // - The loop must have fewer than 16 branches
218 // - The loop must have less than 40 uops in all executed loop branches
220 // The number of taken branches in a loop is hard to estimate here, and
221 // benchmarking has revealed that it is better not to be conservative when
222 // estimating the branch count. As a result, we'll ignore the branch limits
223 // until someone finds a case where it matters in practice.
226 const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
227 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
228 MaxOps = PartialUnrollingThreshold;
229 else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
230 MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
234 // Scan the loop: don't unroll loops with calls.
235 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
239 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
240 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
241 ImmutableCallSite CS(J);
242 if (const Function *F = CS.getCalledFunction()) {
243 if (!TopTTI->isLoweredToCall(F))
251 // Enable runtime and partial unrolling up to the specified size.
252 UP.Partial = UP.Runtime = true;
253 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
256 //===----------------------------------------------------------------------===//
258 // Calls used by the vectorizers.
260 //===----------------------------------------------------------------------===//
262 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert,
263 bool Extract) const {
264 assert (Ty->isVectorTy() && "Can only scalarize vectors");
267 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
269 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
271 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
277 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const {
281 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const {
285 unsigned BasicTTI::getMaximumUnrollFactor() const {
289 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
291 OperandValueKind) const {
292 // Check if any of the operands are vector operands.
293 const TargetLoweringBase *TLI = getTLI();
294 int ISD = TLI->InstructionOpcodeToISD(Opcode);
295 assert(ISD && "Invalid opcode");
297 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
299 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
300 // Assume that floating point arithmetic operations cost twice as much as
301 // integer operations.
302 unsigned OpCost = (IsFloat ? 2 : 1);
304 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
305 // The operation is legal. Assume it costs 1.
306 // If the type is split to multiple registers, assume that there is some
308 // TODO: Once we have extract/insert subvector cost we need to use them.
310 return LT.first * 2 * OpCost;
311 return LT.first * 1 * OpCost;
314 if (!TLI->isOperationExpand(ISD, LT.second)) {
315 // If the operation is custom lowered then assume
316 // thare the code is twice as expensive.
317 return LT.first * 2 * OpCost;
320 // Else, assume that we need to scalarize this op.
321 if (Ty->isVectorTy()) {
322 unsigned Num = Ty->getVectorNumElements();
323 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType());
324 // return the cost of multiple scalar invocation plus the cost of inserting
325 // and extracting the values.
326 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
329 // We don't know anything about this scalar instruction.
333 unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const {
334 assert(Ty->isVectorTy() && "Can only shuffle vectors");
336 // Shuffle cost is equal to the cost of extracting element from its argument
337 // plus the cost of inserting them onto the result vector.
339 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index
340 // 0 of first vector, index 1 of second vector,index 2 of first vector and
341 // finally index 3 of second vector and insert them at index <0,1,2,3> of
343 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
344 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
345 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
350 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
352 if (Kind == SK_Alternate) {
353 return getAltShuffleOverhead(Tp);
358 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
360 const TargetLoweringBase *TLI = getTLI();
361 int ISD = TLI->InstructionOpcodeToISD(Opcode);
362 assert(ISD && "Invalid opcode");
364 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
365 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
367 // Check for NOOP conversions.
368 if (SrcLT.first == DstLT.first &&
369 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
371 // Bitcast between types that are legalized to the same type are free.
372 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
376 if (Opcode == Instruction::Trunc &&
377 TLI->isTruncateFree(SrcLT.second, DstLT.second))
380 if (Opcode == Instruction::ZExt &&
381 TLI->isZExtFree(SrcLT.second, DstLT.second))
384 // If the cast is marked as legal (or promote) then assume low cost.
385 if (SrcLT.first == DstLT.first &&
386 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
389 // Handle scalar conversions.
390 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
392 // Scalar bitcasts are usually free.
393 if (Opcode == Instruction::BitCast)
396 // Just check the op cost. If the operation is legal then assume it costs 1.
397 if (!TLI->isOperationExpand(ISD, DstLT.second))
400 // Assume that illegal scalar instruction are expensive.
404 // Check vector-to-vector casts.
405 if (Dst->isVectorTy() && Src->isVectorTy()) {
407 // If the cast is between same-sized registers, then the check is simple.
408 if (SrcLT.first == DstLT.first &&
409 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
411 // Assume that Zext is done using AND.
412 if (Opcode == Instruction::ZExt)
415 // Assume that sext is done using SHL and SRA.
416 if (Opcode == Instruction::SExt)
419 // Just check the op cost. If the operation is legal then assume it costs
420 // 1 and multiply by the type-legalization overhead.
421 if (!TLI->isOperationExpand(ISD, DstLT.second))
422 return SrcLT.first * 1;
425 // If we are converting vectors and the operation is illegal, or
426 // if the vectors are legalized to different types, estimate the
427 // scalarization costs.
428 unsigned Num = Dst->getVectorNumElements();
429 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(),
430 Src->getScalarType());
432 // Return the cost of multiple scalar invocation plus the cost of
433 // inserting and extracting the values.
434 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
437 // We already handled vector-to-vector and scalar-to-scalar conversions. This
438 // is where we handle bitcast between vectors and scalars. We need to assume
439 // that the conversion is scalarized in one way or another.
440 if (Opcode == Instruction::BitCast)
441 // Illegal bitcasts are done by storing and loading from a stack slot.
442 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
443 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
445 llvm_unreachable("Unhandled cast");
448 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const {
449 // Branches are assumed to be predicted.
453 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
454 Type *CondTy) const {
455 const TargetLoweringBase *TLI = getTLI();
456 int ISD = TLI->InstructionOpcodeToISD(Opcode);
457 assert(ISD && "Invalid opcode");
459 // Selects on vectors are actually vector selects.
460 if (ISD == ISD::SELECT) {
461 assert(CondTy && "CondTy must exist");
462 if (CondTy->isVectorTy())
466 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
468 if (!TLI->isOperationExpand(ISD, LT.second)) {
469 // The operation is legal. Assume it costs 1. Multiply
470 // by the type-legalization overhead.
474 // Otherwise, assume that the cast is scalarized.
475 if (ValTy->isVectorTy()) {
476 unsigned Num = ValTy->getVectorNumElements();
478 CondTy = CondTy->getScalarType();
479 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
482 // Return the cost of multiple scalar invocation plus the cost of inserting
483 // and extracting the values.
484 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
487 // Unknown scalar opcode.
491 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
492 unsigned Index) const {
493 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType());
498 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
500 unsigned AddressSpace) const {
501 assert(!Src->isVoidTy() && "Invalid type");
502 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
504 // Assuming that all loads of legal types cost 1.
505 unsigned Cost = LT.first;
507 if (Src->isVectorTy() &&
508 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
509 // This is a vector load that legalizes to a larger type than the vector
510 // itself. Unless the corresponding extending load or truncating store is
511 // legal, then this will scalarize.
512 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
513 EVT MemVT = getTLI()->getValueType(Src, true);
514 if (MemVT.isSimple() && MemVT != MVT::Other) {
515 if (Opcode == Instruction::Store)
516 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
518 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT());
521 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
522 // This is a vector load/store for some illegal type that is scalarized.
523 // We must account for the cost of building or decomposing the vector.
524 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
525 Opcode == Instruction::Store);
532 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
533 ArrayRef<Type *> Tys) const {
537 // Assume that we need to scalarize this intrinsic.
538 unsigned ScalarizationCost = 0;
539 unsigned ScalarCalls = 1;
540 if (RetTy->isVectorTy()) {
541 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
542 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
544 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
545 if (Tys[i]->isVectorTy()) {
546 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
547 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
551 return ScalarCalls + ScalarizationCost;
553 // Look for intrinsics that can be lowered directly or turned into a scalar
555 case Intrinsic::sqrt: ISD = ISD::FSQRT; break;
556 case Intrinsic::sin: ISD = ISD::FSIN; break;
557 case Intrinsic::cos: ISD = ISD::FCOS; break;
558 case Intrinsic::exp: ISD = ISD::FEXP; break;
559 case Intrinsic::exp2: ISD = ISD::FEXP2; break;
560 case Intrinsic::log: ISD = ISD::FLOG; break;
561 case Intrinsic::log10: ISD = ISD::FLOG10; break;
562 case Intrinsic::log2: ISD = ISD::FLOG2; break;
563 case Intrinsic::fabs: ISD = ISD::FABS; break;
564 case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break;
565 case Intrinsic::floor: ISD = ISD::FFLOOR; break;
566 case Intrinsic::ceil: ISD = ISD::FCEIL; break;
567 case Intrinsic::trunc: ISD = ISD::FTRUNC; break;
568 case Intrinsic::nearbyint:
569 ISD = ISD::FNEARBYINT; break;
570 case Intrinsic::rint: ISD = ISD::FRINT; break;
571 case Intrinsic::round: ISD = ISD::FROUND; break;
572 case Intrinsic::pow: ISD = ISD::FPOW; break;
573 case Intrinsic::fma: ISD = ISD::FMA; break;
574 case Intrinsic::fmuladd: ISD = ISD::FMA; break;
575 case Intrinsic::lifetime_start:
576 case Intrinsic::lifetime_end:
580 const TargetLoweringBase *TLI = getTLI();
581 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
583 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
584 // The operation is legal. Assume it costs 1.
585 // If the type is split to multiple registers, assume that thre is some
587 // TODO: Once we have extract/insert subvector cost we need to use them.
593 if (!TLI->isOperationExpand(ISD, LT.second)) {
594 // If the operation is custom lowered then assume
595 // thare the code is twice as expensive.
599 // If we can't lower fmuladd into an FMA estimate the cost as a floating
600 // point mul followed by an add.
601 if (IID == Intrinsic::fmuladd)
602 return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
603 TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
605 // Else, assume that we need to scalarize this intrinsic. For math builtins
606 // this will emit a costly libcall, adding call overhead and spills. Make it
608 if (RetTy->isVectorTy()) {
609 unsigned Num = RetTy->getVectorNumElements();
610 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(),
612 return 10 * Cost * Num;
615 // This is going to be turned into a library call, make it expensive.
619 unsigned BasicTTI::getNumberOfParts(Type *Tp) const {
620 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
624 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
628 unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty,
629 bool IsPairwise) const {
630 assert(Ty->isVectorTy() && "Expect a vector type");
631 unsigned NumVecElts = Ty->getVectorNumElements();
632 unsigned NumReduxLevels = Log2_32(NumVecElts);
633 unsigned ArithCost = NumReduxLevels *
634 TopTTI->getArithmeticInstrCost(Opcode, Ty);
635 // Assume the pairwise shuffles add a cost.
636 unsigned ShuffleCost =
637 NumReduxLevels * (IsPairwise + 1) *
638 TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
639 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);