1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
27 extern cl::opt<unsigned> PartialUnrollingThreshold;
29 /// \brief Base class which can be used to help build a TTI implementation.
31 /// This class provides as much implementation of the TTI interface as is
32 /// possible using the target independent parts of the code generator.
34 /// In order to subclass it, your class must implement a getST() method to
35 /// return the subtarget, and a getTLI() method to return the target lowering.
36 /// We need these methods implemented in the derived class so that this class
37 /// doesn't have to duplicate storage for them.
39 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
41 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
42 typedef TargetTransformInfo TTI;
44 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
45 /// are set if the result needs to be inserted and/or extracted from vectors.
46 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
47 assert(Ty->isVectorTy() && "Can only scalarize vectors");
50 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
52 Cost += static_cast<T *>(this)
53 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
55 Cost += static_cast<T *>(this)
56 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
62 /// Estimate the cost overhead of SK_Alternate shuffle.
63 unsigned getAltShuffleOverhead(Type *Ty) {
64 assert(Ty->isVectorTy() && "Can only shuffle vectors");
66 // Shuffle cost is equal to the cost of extracting element from its argument
67 // plus the cost of inserting them onto the result vector.
69 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
70 // index 0 of first vector, index 1 of second vector,index 2 of first
71 // vector and finally index 3 of second vector and insert them at index
72 // <0,1,2,3> of result vector.
73 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
74 Cost += static_cast<T *>(this)
75 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
76 Cost += static_cast<T *>(this)
77 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
82 /// \brief Local query method delegates up to T which *must* implement this!
83 const TargetSubtargetInfo *getST() const {
84 return static_cast<const T *>(this)->getST();
87 /// \brief Local query method delegates up to T which *must* implement this!
88 const TargetLoweringBase *getTLI() const {
89 return static_cast<const T *>(this)->getTLI();
93 explicit BasicTTIImplBase(const TargetMachine *TM)
94 : BaseT(TM->getDataLayout()) {}
97 // Provide value semantics. MSVC requires that we spell all of these out.
98 BasicTTIImplBase(const BasicTTIImplBase &Arg)
99 : BaseT(static_cast<const BaseT &>(Arg)) {}
100 BasicTTIImplBase(BasicTTIImplBase &&Arg)
101 : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
102 BasicTTIImplBase &operator=(const BasicTTIImplBase &RHS) {
103 BaseT::operator=(static_cast<const BaseT &>(RHS));
106 BasicTTIImplBase &operator=(BasicTTIImplBase &&RHS) {
107 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
111 /// \name Scalar TTI Implementations
114 bool hasBranchDivergence() { return false; }
116 bool isLegalAddImmediate(int64_t imm) {
117 return getTLI()->isLegalAddImmediate(imm);
120 bool isLegalICmpImmediate(int64_t imm) {
121 return getTLI()->isLegalICmpImmediate(imm);
124 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
125 bool HasBaseReg, int64_t Scale) {
126 TargetLoweringBase::AddrMode AM;
128 AM.BaseOffs = BaseOffset;
129 AM.HasBaseReg = HasBaseReg;
131 return getTLI()->isLegalAddressingMode(AM, Ty);
134 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
135 bool HasBaseReg, int64_t Scale) {
136 TargetLoweringBase::AddrMode AM;
138 AM.BaseOffs = BaseOffset;
139 AM.HasBaseReg = HasBaseReg;
141 return getTLI()->getScalingFactorCost(AM, Ty);
144 bool isTruncateFree(Type *Ty1, Type *Ty2) {
145 return getTLI()->isTruncateFree(Ty1, Ty2);
148 bool isTypeLegal(Type *Ty) {
149 EVT VT = getTLI()->getValueType(Ty);
150 return getTLI()->isTypeLegal(VT);
153 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
154 ArrayRef<const Value *> Arguments) {
155 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
158 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
159 ArrayRef<Type *> ParamTys) {
160 if (IID == Intrinsic::cttz) {
161 if (getTLI()->isCheapToSpeculateCttz())
162 return TargetTransformInfo::TCC_Basic;
163 return TargetTransformInfo::TCC_Expensive;
166 if (IID == Intrinsic::ctlz) {
167 if (getTLI()->isCheapToSpeculateCtlz())
168 return TargetTransformInfo::TCC_Basic;
169 return TargetTransformInfo::TCC_Expensive;
172 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
175 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
177 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
179 bool shouldBuildLookupTables() {
180 const TargetLoweringBase *TLI = getTLI();
181 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
182 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
185 bool haveFastSqrt(Type *Ty) {
186 const TargetLoweringBase *TLI = getTLI();
187 EVT VT = TLI->getValueType(Ty);
188 return TLI->isTypeLegal(VT) &&
189 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
192 unsigned getFPOpCost(Type *Ty) {
193 // By default, FP instructions are no more expensive since they are
194 // implemented in HW. Target specific TTI can override this.
195 return TargetTransformInfo::TCC_Basic;
198 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
199 // This unrolling functionality is target independent, but to provide some
200 // motivation for its intended use, for x86:
202 // According to the Intel 64 and IA-32 Architectures Optimization Reference
203 // Manual, Intel Core models and later have a loop stream detector (and
204 // associated uop queue) that can benefit from partial unrolling.
205 // The relevant requirements are:
206 // - The loop must have no more than 4 (8 for Nehalem and later) branches
207 // taken, and none of them may be calls.
208 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
210 // According to the Software Optimization Guide for AMD Family 15h
211 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
212 // and loop buffer which can benefit from partial unrolling.
213 // The relevant requirements are:
214 // - The loop must have fewer than 16 branches
215 // - The loop must have less than 40 uops in all executed loop branches
217 // The number of taken branches in a loop is hard to estimate here, and
218 // benchmarking has revealed that it is better not to be conservative when
219 // estimating the branch count. As a result, we'll ignore the branch limits
220 // until someone finds a case where it matters in practice.
223 const TargetSubtargetInfo *ST = getST();
224 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
225 MaxOps = PartialUnrollingThreshold;
226 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
227 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
231 // Scan the loop: don't unroll loops with calls.
232 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
236 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
237 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
238 ImmutableCallSite CS(J);
239 if (const Function *F = CS.getCalledFunction()) {
240 if (!static_cast<T *>(this)->isLoweredToCall(F))
248 // Enable runtime and partial unrolling up to the specified size.
249 UP.Partial = UP.Runtime = true;
250 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
255 /// \name Vector TTI Implementations
258 unsigned getNumberOfRegisters(bool Vector) { return 1; }
260 unsigned getRegisterBitWidth(bool Vector) { return 32; }
262 unsigned getMaxInterleaveFactor() { return 1; }
264 unsigned getArithmeticInstrCost(
265 unsigned Opcode, Type *Ty,
266 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
267 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
268 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
269 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
270 // Check if any of the operands are vector operands.
271 const TargetLoweringBase *TLI = getTLI();
272 int ISD = TLI->InstructionOpcodeToISD(Opcode);
273 assert(ISD && "Invalid opcode");
275 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
277 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
278 // Assume that floating point arithmetic operations cost twice as much as
279 // integer operations.
280 unsigned OpCost = (IsFloat ? 2 : 1);
282 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
283 // The operation is legal. Assume it costs 1.
284 // If the type is split to multiple registers, assume that there is some
286 // TODO: Once we have extract/insert subvector cost we need to use them.
288 return LT.first * 2 * OpCost;
289 return LT.first * 1 * OpCost;
292 if (!TLI->isOperationExpand(ISD, LT.second)) {
293 // If the operation is custom lowered then assume
294 // thare the code is twice as expensive.
295 return LT.first * 2 * OpCost;
298 // Else, assume that we need to scalarize this op.
299 if (Ty->isVectorTy()) {
300 unsigned Num = Ty->getVectorNumElements();
301 unsigned Cost = static_cast<T *>(this)
302 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
303 // return the cost of multiple scalar invocation plus the cost of
305 // and extracting the values.
306 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
309 // We don't know anything about this scalar instruction.
313 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
315 if (Kind == TTI::SK_Alternate) {
316 return getAltShuffleOverhead(Tp);
321 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
322 const TargetLoweringBase *TLI = getTLI();
323 int ISD = TLI->InstructionOpcodeToISD(Opcode);
324 assert(ISD && "Invalid opcode");
326 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
327 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
329 // Check for NOOP conversions.
330 if (SrcLT.first == DstLT.first &&
331 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
333 // Bitcast between types that are legalized to the same type are free.
334 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
338 if (Opcode == Instruction::Trunc &&
339 TLI->isTruncateFree(SrcLT.second, DstLT.second))
342 if (Opcode == Instruction::ZExt &&
343 TLI->isZExtFree(SrcLT.second, DstLT.second))
346 // If the cast is marked as legal (or promote) then assume low cost.
347 if (SrcLT.first == DstLT.first &&
348 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
351 // Handle scalar conversions.
352 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
354 // Scalar bitcasts are usually free.
355 if (Opcode == Instruction::BitCast)
358 // Just check the op cost. If the operation is legal then assume it costs
360 if (!TLI->isOperationExpand(ISD, DstLT.second))
363 // Assume that illegal scalar instruction are expensive.
367 // Check vector-to-vector casts.
368 if (Dst->isVectorTy() && Src->isVectorTy()) {
370 // If the cast is between same-sized registers, then the check is simple.
371 if (SrcLT.first == DstLT.first &&
372 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
374 // Assume that Zext is done using AND.
375 if (Opcode == Instruction::ZExt)
378 // Assume that sext is done using SHL and SRA.
379 if (Opcode == Instruction::SExt)
382 // Just check the op cost. If the operation is legal then assume it
384 // 1 and multiply by the type-legalization overhead.
385 if (!TLI->isOperationExpand(ISD, DstLT.second))
386 return SrcLT.first * 1;
389 // If we are converting vectors and the operation is illegal, or
390 // if the vectors are legalized to different types, estimate the
391 // scalarization costs.
392 unsigned Num = Dst->getVectorNumElements();
393 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
394 Opcode, Dst->getScalarType(), Src->getScalarType());
396 // Return the cost of multiple scalar invocation plus the cost of
397 // inserting and extracting the values.
398 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
401 // We already handled vector-to-vector and scalar-to-scalar conversions.
403 // is where we handle bitcast between vectors and scalars. We need to assume
404 // that the conversion is scalarized in one way or another.
405 if (Opcode == Instruction::BitCast)
406 // Illegal bitcasts are done by storing and loading from a stack slot.
407 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
409 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
412 llvm_unreachable("Unhandled cast");
415 unsigned getCFInstrCost(unsigned Opcode) {
416 // Branches are assumed to be predicted.
420 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
421 const TargetLoweringBase *TLI = getTLI();
422 int ISD = TLI->InstructionOpcodeToISD(Opcode);
423 assert(ISD && "Invalid opcode");
425 // Selects on vectors are actually vector selects.
426 if (ISD == ISD::SELECT) {
427 assert(CondTy && "CondTy must exist");
428 if (CondTy->isVectorTy())
432 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
434 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
435 !TLI->isOperationExpand(ISD, LT.second)) {
436 // The operation is legal. Assume it costs 1. Multiply
437 // by the type-legalization overhead.
441 // Otherwise, assume that the cast is scalarized.
442 if (ValTy->isVectorTy()) {
443 unsigned Num = ValTy->getVectorNumElements();
445 CondTy = CondTy->getScalarType();
446 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
447 Opcode, ValTy->getScalarType(), CondTy);
449 // Return the cost of multiple scalar invocation plus the cost of
451 // and extracting the values.
452 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
455 // Unknown scalar opcode.
459 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
460 std::pair<unsigned, MVT> LT =
461 getTLI()->getTypeLegalizationCost(Val->getScalarType());
466 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
467 unsigned AddressSpace) {
468 assert(!Src->isVoidTy() && "Invalid type");
469 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
471 // Assuming that all loads of legal types cost 1.
472 unsigned Cost = LT.first;
474 if (Src->isVectorTy() &&
475 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
476 // This is a vector load that legalizes to a larger type than the vector
477 // itself. Unless the corresponding extending load or truncating store is
478 // legal, then this will scalarize.
479 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
480 EVT MemVT = getTLI()->getValueType(Src, true);
481 if (MemVT.isSimple() && MemVT != MVT::Other) {
482 if (Opcode == Instruction::Store)
483 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
485 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
488 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
489 // This is a vector load/store for some illegal type that is scalarized.
490 // We must account for the cost of building or decomposing the vector.
491 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
492 Opcode == Instruction::Store);
499 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
500 ArrayRef<Type *> Tys) {
504 // Assume that we need to scalarize this intrinsic.
505 unsigned ScalarizationCost = 0;
506 unsigned ScalarCalls = 1;
507 if (RetTy->isVectorTy()) {
508 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
509 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
511 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
512 if (Tys[i]->isVectorTy()) {
513 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
514 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
518 return ScalarCalls + ScalarizationCost;
520 // Look for intrinsics that can be lowered directly or turned into a scalar
522 case Intrinsic::sqrt:
534 case Intrinsic::exp2:
540 case Intrinsic::log10:
543 case Intrinsic::log2:
546 case Intrinsic::fabs:
549 case Intrinsic::minnum:
552 case Intrinsic::maxnum:
555 case Intrinsic::copysign:
556 ISD = ISD::FCOPYSIGN;
558 case Intrinsic::floor:
561 case Intrinsic::ceil:
564 case Intrinsic::trunc:
567 case Intrinsic::nearbyint:
568 ISD = ISD::FNEARBYINT;
570 case Intrinsic::rint:
573 case Intrinsic::round:
582 case Intrinsic::fmuladd:
585 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
586 case Intrinsic::lifetime_start:
587 case Intrinsic::lifetime_end:
589 case Intrinsic::masked_store:
590 return static_cast<T *>(this)
591 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
592 case Intrinsic::masked_load:
593 return static_cast<T *>(this)
594 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
597 const TargetLoweringBase *TLI = getTLI();
598 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
600 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
601 // The operation is legal. Assume it costs 1.
602 // If the type is split to multiple registers, assume that there is some
604 // TODO: Once we have extract/insert subvector cost we need to use them.
610 if (!TLI->isOperationExpand(ISD, LT.second)) {
611 // If the operation is custom lowered then assume
612 // thare the code is twice as expensive.
616 // If we can't lower fmuladd into an FMA estimate the cost as a floating
617 // point mul followed by an add.
618 if (IID == Intrinsic::fmuladd)
619 return static_cast<T *>(this)
620 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
621 static_cast<T *>(this)
622 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
624 // Else, assume that we need to scalarize this intrinsic. For math builtins
625 // this will emit a costly libcall, adding call overhead and spills. Make it
627 if (RetTy->isVectorTy()) {
628 unsigned Num = RetTy->getVectorNumElements();
629 unsigned Cost = static_cast<T *>(this)->getIntrinsicInstrCost(
630 IID, RetTy->getScalarType(), Tys);
631 return 10 * Cost * Num;
634 // This is going to be turned into a library call, make it expensive.
638 unsigned getNumberOfParts(Type *Tp) {
639 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
643 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
645 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
646 assert(Ty->isVectorTy() && "Expect a vector type");
647 unsigned NumVecElts = Ty->getVectorNumElements();
648 unsigned NumReduxLevels = Log2_32(NumVecElts);
651 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
652 // Assume the pairwise shuffles add a cost.
653 unsigned ShuffleCost =
654 NumReduxLevels * (IsPairwise + 1) *
655 static_cast<T *>(this)
656 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
657 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
663 /// \brief Concrete BasicTTIImpl that can be used if no further customization
665 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
666 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
667 friend class BasicTTIImplBase<BasicTTIImpl>;
669 const TargetSubtargetInfo *ST;
670 const TargetLoweringBase *TLI;
672 const TargetSubtargetInfo *getST() const { return ST; }
673 const TargetLoweringBase *getTLI() const { return TLI; }
676 explicit BasicTTIImpl(const TargetMachine *ST, Function &F);
678 // Provide value semantics. MSVC requires that we spell all of these out.
679 BasicTTIImpl(const BasicTTIImpl &Arg)
680 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
681 BasicTTIImpl(BasicTTIImpl &&Arg)
682 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
683 TLI(std::move(Arg.TLI)) {}
684 BasicTTIImpl &operator=(const BasicTTIImpl &RHS) {
685 BaseT::operator=(static_cast<const BaseT &>(RHS));
690 BasicTTIImpl &operator=(BasicTTIImpl &&RHS) {
691 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
692 ST = std::move(RHS.ST);
693 TLI = std::move(RHS.TLI);