1 //===- BasicTTIImpl.h -------------------------------------------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides a helper that implements much of the TTI interface in
11 /// terms of the target-independent code generator and TargetLowering
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_BASICTTIIMPL_H
17 #define LLVM_CODEGEN_BASICTTIIMPL_H
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfoImpl.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
24 #include "llvm/Analysis/TargetLibraryInfo.h"
28 extern cl::opt<unsigned> PartialUnrollingThreshold;
30 /// \brief Base class which can be used to help build a TTI implementation.
32 /// This class provides as much implementation of the TTI interface as is
33 /// possible using the target independent parts of the code generator.
35 /// In order to subclass it, your class must implement a getST() method to
36 /// return the subtarget, and a getTLI() method to return the target lowering.
37 /// We need these methods implemented in the derived class so that this class
38 /// doesn't have to duplicate storage for them.
40 class BasicTTIImplBase : public TargetTransformInfoImplCRTPBase<T> {
42 typedef TargetTransformInfoImplCRTPBase<T> BaseT;
43 typedef TargetTransformInfo TTI;
45 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
46 /// are set if the result needs to be inserted and/or extracted from vectors.
47 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
48 assert(Ty->isVectorTy() && "Can only scalarize vectors");
51 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
53 Cost += static_cast<T *>(this)
54 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
56 Cost += static_cast<T *>(this)
57 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
63 /// Estimate the cost overhead of SK_Alternate shuffle.
64 unsigned getAltShuffleOverhead(Type *Ty) {
65 assert(Ty->isVectorTy() && "Can only shuffle vectors");
67 // Shuffle cost is equal to the cost of extracting element from its argument
68 // plus the cost of inserting them onto the result vector.
70 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from
71 // index 0 of first vector, index 1 of second vector,index 2 of first
72 // vector and finally index 3 of second vector and insert them at index
73 // <0,1,2,3> of result vector.
74 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
75 Cost += static_cast<T *>(this)
76 ->getVectorInstrCost(Instruction::InsertElement, Ty, i);
77 Cost += static_cast<T *>(this)
78 ->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
83 /// \brief Local query method delegates up to T which *must* implement this!
84 const TargetSubtargetInfo *getST() const {
85 return static_cast<const T *>(this)->getST();
88 /// \brief Local query method delegates up to T which *must* implement this!
89 const TargetLoweringBase *getTLI() const {
90 return static_cast<const T *>(this)->getTLI();
94 explicit BasicTTIImplBase(const TargetMachine *TM)
95 : BaseT(TM->getDataLayout()) {}
98 // Provide value semantics. MSVC requires that we spell all of these out.
99 BasicTTIImplBase(const BasicTTIImplBase &Arg)
100 : BaseT(static_cast<const BaseT &>(Arg)) {}
101 BasicTTIImplBase(BasicTTIImplBase &&Arg)
102 : BaseT(std::move(static_cast<BaseT &>(Arg))) {}
103 BasicTTIImplBase &operator=(const BasicTTIImplBase &RHS) {
104 BaseT::operator=(static_cast<const BaseT &>(RHS));
107 BasicTTIImplBase &operator=(BasicTTIImplBase &&RHS) {
108 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
112 /// \name Scalar TTI Implementations
115 bool hasBranchDivergence() { return false; }
117 bool isSourceOfDivergence(const Value *V) { return false; }
119 bool isLegalAddImmediate(int64_t imm) {
120 return getTLI()->isLegalAddImmediate(imm);
123 bool isLegalICmpImmediate(int64_t imm) {
124 return getTLI()->isLegalICmpImmediate(imm);
127 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
128 bool HasBaseReg, int64_t Scale) {
129 TargetLoweringBase::AddrMode AM;
131 AM.BaseOffs = BaseOffset;
132 AM.HasBaseReg = HasBaseReg;
134 return getTLI()->isLegalAddressingMode(AM, Ty);
137 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV, int64_t BaseOffset,
138 bool HasBaseReg, int64_t Scale) {
139 TargetLoweringBase::AddrMode AM;
141 AM.BaseOffs = BaseOffset;
142 AM.HasBaseReg = HasBaseReg;
144 return getTLI()->getScalingFactorCost(AM, Ty);
147 bool isTruncateFree(Type *Ty1, Type *Ty2) {
148 return getTLI()->isTruncateFree(Ty1, Ty2);
151 bool isProfitableToHoist(Instruction *I) {
152 return getTLI()->isProfitableToHoist(I);
155 bool isTypeLegal(Type *Ty) {
156 EVT VT = getTLI()->getValueType(Ty);
157 return getTLI()->isTypeLegal(VT);
160 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
161 ArrayRef<const Value *> Arguments) {
162 return BaseT::getIntrinsicCost(IID, RetTy, Arguments);
165 unsigned getIntrinsicCost(Intrinsic::ID IID, Type *RetTy,
166 ArrayRef<Type *> ParamTys) {
167 if (IID == Intrinsic::cttz) {
168 if (getTLI()->isCheapToSpeculateCttz())
169 return TargetTransformInfo::TCC_Basic;
170 return TargetTransformInfo::TCC_Expensive;
173 if (IID == Intrinsic::ctlz) {
174 if (getTLI()->isCheapToSpeculateCtlz())
175 return TargetTransformInfo::TCC_Basic;
176 return TargetTransformInfo::TCC_Expensive;
179 return BaseT::getIntrinsicCost(IID, RetTy, ParamTys);
182 unsigned getJumpBufAlignment() { return getTLI()->getJumpBufAlignment(); }
184 unsigned getJumpBufSize() { return getTLI()->getJumpBufSize(); }
186 bool shouldBuildLookupTables() {
187 const TargetLoweringBase *TLI = getTLI();
188 return TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
189 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other);
192 bool haveFastSqrt(Type *Ty) {
193 const TargetLoweringBase *TLI = getTLI();
194 EVT VT = TLI->getValueType(Ty);
195 return TLI->isTypeLegal(VT) &&
196 TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
199 unsigned getFPOpCost(Type *Ty) {
200 // By default, FP instructions are no more expensive since they are
201 // implemented in HW. Target specific TTI can override this.
202 return TargetTransformInfo::TCC_Basic;
205 unsigned getOperationCost(unsigned Opcode, Type *Ty, Type *OpTy) {
206 const TargetLoweringBase *TLI = getTLI();
209 case Instruction::Trunc: {
210 if (TLI->isTruncateFree(OpTy, Ty))
211 return TargetTransformInfo::TCC_Free;
212 return TargetTransformInfo::TCC_Basic;
214 case Instruction::ZExt: {
215 if (TLI->isZExtFree(OpTy, Ty))
216 return TargetTransformInfo::TCC_Free;
217 return TargetTransformInfo::TCC_Basic;
221 return BaseT::getOperationCost(Opcode, Ty, OpTy);
224 void getUnrollingPreferences(Loop *L, TTI::UnrollingPreferences &UP) {
225 // This unrolling functionality is target independent, but to provide some
226 // motivation for its intended use, for x86:
228 // According to the Intel 64 and IA-32 Architectures Optimization Reference
229 // Manual, Intel Core models and later have a loop stream detector (and
230 // associated uop queue) that can benefit from partial unrolling.
231 // The relevant requirements are:
232 // - The loop must have no more than 4 (8 for Nehalem and later) branches
233 // taken, and none of them may be calls.
234 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
236 // According to the Software Optimization Guide for AMD Family 15h
237 // Processors, models 30h-4fh (Steamroller and later) have a loop predictor
238 // and loop buffer which can benefit from partial unrolling.
239 // The relevant requirements are:
240 // - The loop must have fewer than 16 branches
241 // - The loop must have less than 40 uops in all executed loop branches
243 // The number of taken branches in a loop is hard to estimate here, and
244 // benchmarking has revealed that it is better not to be conservative when
245 // estimating the branch count. As a result, we'll ignore the branch limits
246 // until someone finds a case where it matters in practice.
249 const TargetSubtargetInfo *ST = getST();
250 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
251 MaxOps = PartialUnrollingThreshold;
252 else if (ST->getSchedModel().LoopMicroOpBufferSize > 0)
253 MaxOps = ST->getSchedModel().LoopMicroOpBufferSize;
257 // Scan the loop: don't unroll loops with calls.
258 for (Loop::block_iterator I = L->block_begin(), E = L->block_end(); I != E;
262 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
263 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
264 ImmutableCallSite CS(J);
265 if (const Function *F = CS.getCalledFunction()) {
266 if (!static_cast<T *>(this)->isLoweredToCall(F))
274 // Enable runtime and partial unrolling up to the specified size.
275 UP.Partial = UP.Runtime = true;
276 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
281 /// \name Vector TTI Implementations
284 unsigned getNumberOfRegisters(bool Vector) { return 1; }
286 unsigned getRegisterBitWidth(bool Vector) { return 32; }
288 unsigned getMaxInterleaveFactor(unsigned VF) { return 1; }
290 unsigned getArithmeticInstrCost(
291 unsigned Opcode, Type *Ty,
292 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
293 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
294 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
295 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None) {
296 // Check if any of the operands are vector operands.
297 const TargetLoweringBase *TLI = getTLI();
298 int ISD = TLI->InstructionOpcodeToISD(Opcode);
299 assert(ISD && "Invalid opcode");
301 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
303 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
304 // Assume that floating point arithmetic operations cost twice as much as
305 // integer operations.
306 unsigned OpCost = (IsFloat ? 2 : 1);
308 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
309 // The operation is legal. Assume it costs 1.
310 // If the type is split to multiple registers, assume that there is some
312 // TODO: Once we have extract/insert subvector cost we need to use them.
314 return LT.first * 2 * OpCost;
315 return LT.first * 1 * OpCost;
318 if (!TLI->isOperationExpand(ISD, LT.second)) {
319 // If the operation is custom lowered then assume
320 // thare the code is twice as expensive.
321 return LT.first * 2 * OpCost;
324 // Else, assume that we need to scalarize this op.
325 if (Ty->isVectorTy()) {
326 unsigned Num = Ty->getVectorNumElements();
327 unsigned Cost = static_cast<T *>(this)
328 ->getArithmeticInstrCost(Opcode, Ty->getScalarType());
329 // return the cost of multiple scalar invocation plus the cost of
331 // and extracting the values.
332 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
335 // We don't know anything about this scalar instruction.
339 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
341 if (Kind == TTI::SK_Alternate) {
342 return getAltShuffleOverhead(Tp);
347 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
348 const TargetLoweringBase *TLI = getTLI();
349 int ISD = TLI->InstructionOpcodeToISD(Opcode);
350 assert(ISD && "Invalid opcode");
352 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
353 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
355 // Check for NOOP conversions.
356 if (SrcLT.first == DstLT.first &&
357 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
359 // Bitcast between types that are legalized to the same type are free.
360 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
364 if (Opcode == Instruction::Trunc &&
365 TLI->isTruncateFree(SrcLT.second, DstLT.second))
368 if (Opcode == Instruction::ZExt &&
369 TLI->isZExtFree(SrcLT.second, DstLT.second))
372 // If the cast is marked as legal (or promote) then assume low cost.
373 if (SrcLT.first == DstLT.first &&
374 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
377 // Handle scalar conversions.
378 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
380 // Scalar bitcasts are usually free.
381 if (Opcode == Instruction::BitCast)
384 // Just check the op cost. If the operation is legal then assume it costs
386 if (!TLI->isOperationExpand(ISD, DstLT.second))
389 // Assume that illegal scalar instruction are expensive.
393 // Check vector-to-vector casts.
394 if (Dst->isVectorTy() && Src->isVectorTy()) {
396 // If the cast is between same-sized registers, then the check is simple.
397 if (SrcLT.first == DstLT.first &&
398 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
400 // Assume that Zext is done using AND.
401 if (Opcode == Instruction::ZExt)
404 // Assume that sext is done using SHL and SRA.
405 if (Opcode == Instruction::SExt)
408 // Just check the op cost. If the operation is legal then assume it
410 // 1 and multiply by the type-legalization overhead.
411 if (!TLI->isOperationExpand(ISD, DstLT.second))
412 return SrcLT.first * 1;
415 // If we are converting vectors and the operation is illegal, or
416 // if the vectors are legalized to different types, estimate the
417 // scalarization costs.
418 unsigned Num = Dst->getVectorNumElements();
419 unsigned Cost = static_cast<T *>(this)->getCastInstrCost(
420 Opcode, Dst->getScalarType(), Src->getScalarType());
422 // Return the cost of multiple scalar invocation plus the cost of
423 // inserting and extracting the values.
424 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
427 // We already handled vector-to-vector and scalar-to-scalar conversions.
429 // is where we handle bitcast between vectors and scalars. We need to assume
430 // that the conversion is scalarized in one way or another.
431 if (Opcode == Instruction::BitCast)
432 // Illegal bitcasts are done by storing and loading from a stack slot.
433 return (Src->isVectorTy() ? getScalarizationOverhead(Src, false, true)
435 (Dst->isVectorTy() ? getScalarizationOverhead(Dst, true, false)
438 llvm_unreachable("Unhandled cast");
441 unsigned getCFInstrCost(unsigned Opcode) {
442 // Branches are assumed to be predicted.
446 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
447 const TargetLoweringBase *TLI = getTLI();
448 int ISD = TLI->InstructionOpcodeToISD(Opcode);
449 assert(ISD && "Invalid opcode");
451 // Selects on vectors are actually vector selects.
452 if (ISD == ISD::SELECT) {
453 assert(CondTy && "CondTy must exist");
454 if (CondTy->isVectorTy())
458 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
460 if (!(ValTy->isVectorTy() && !LT.second.isVector()) &&
461 !TLI->isOperationExpand(ISD, LT.second)) {
462 // The operation is legal. Assume it costs 1. Multiply
463 // by the type-legalization overhead.
467 // Otherwise, assume that the cast is scalarized.
468 if (ValTy->isVectorTy()) {
469 unsigned Num = ValTy->getVectorNumElements();
471 CondTy = CondTy->getScalarType();
472 unsigned Cost = static_cast<T *>(this)->getCmpSelInstrCost(
473 Opcode, ValTy->getScalarType(), CondTy);
475 // Return the cost of multiple scalar invocation plus the cost of
477 // and extracting the values.
478 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
481 // Unknown scalar opcode.
485 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
486 std::pair<unsigned, MVT> LT =
487 getTLI()->getTypeLegalizationCost(Val->getScalarType());
492 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
493 unsigned AddressSpace) {
494 assert(!Src->isVoidTy() && "Invalid type");
495 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
497 // Assuming that all loads of legal types cost 1.
498 unsigned Cost = LT.first;
500 if (Src->isVectorTy() &&
501 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
502 // This is a vector load that legalizes to a larger type than the vector
503 // itself. Unless the corresponding extending load or truncating store is
504 // legal, then this will scalarize.
505 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
506 EVT MemVT = getTLI()->getValueType(Src, true);
507 if (MemVT.isSimple() && MemVT != MVT::Other) {
508 if (Opcode == Instruction::Store)
509 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
511 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, LT.second, MemVT);
514 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
515 // This is a vector load/store for some illegal type that is scalarized.
516 // We must account for the cost of building or decomposing the vector.
517 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
518 Opcode == Instruction::Store);
525 unsigned getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
526 ArrayRef<Type *> Tys) {
530 // Assume that we need to scalarize this intrinsic.
531 unsigned ScalarizationCost = 0;
532 unsigned ScalarCalls = 1;
533 Type *ScalarRetTy = RetTy;
534 if (RetTy->isVectorTy()) {
535 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
536 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
537 ScalarRetTy = RetTy->getScalarType();
539 SmallVector<Type *, 4> ScalarTys;
540 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
542 if (Ty->isVectorTy()) {
543 ScalarizationCost += getScalarizationOverhead(Ty, false, true);
544 ScalarCalls = std::max(ScalarCalls, Ty->getVectorNumElements());
545 Ty = Ty->getScalarType();
547 ScalarTys.push_back(Ty);
549 if (ScalarCalls == 1)
550 return 1; // Return cost of a scalar intrinsic. Assume it to be cheap.
552 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
553 IID, ScalarRetTy, ScalarTys);
555 return ScalarCalls * ScalarCost + ScalarizationCost;
557 // Look for intrinsics that can be lowered directly or turned into a scalar
559 case Intrinsic::sqrt:
571 case Intrinsic::exp2:
577 case Intrinsic::log10:
580 case Intrinsic::log2:
583 case Intrinsic::fabs:
586 case Intrinsic::minnum:
589 case Intrinsic::maxnum:
592 case Intrinsic::copysign:
593 ISD = ISD::FCOPYSIGN;
595 case Intrinsic::floor:
598 case Intrinsic::ceil:
601 case Intrinsic::trunc:
604 case Intrinsic::nearbyint:
605 ISD = ISD::FNEARBYINT;
607 case Intrinsic::rint:
610 case Intrinsic::round:
619 case Intrinsic::fmuladd:
622 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
623 case Intrinsic::lifetime_start:
624 case Intrinsic::lifetime_end:
626 case Intrinsic::masked_store:
627 return static_cast<T *>(this)
628 ->getMaskedMemoryOpCost(Instruction::Store, Tys[0], 0, 0);
629 case Intrinsic::masked_load:
630 return static_cast<T *>(this)
631 ->getMaskedMemoryOpCost(Instruction::Load, RetTy, 0, 0);
634 const TargetLoweringBase *TLI = getTLI();
635 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
637 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
638 // The operation is legal. Assume it costs 1.
639 // If the type is split to multiple registers, assume that there is some
641 // TODO: Once we have extract/insert subvector cost we need to use them.
647 if (!TLI->isOperationExpand(ISD, LT.second)) {
648 // If the operation is custom lowered then assume
649 // thare the code is twice as expensive.
653 // If we can't lower fmuladd into an FMA estimate the cost as a floating
654 // point mul followed by an add.
655 if (IID == Intrinsic::fmuladd)
656 return static_cast<T *>(this)
657 ->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
658 static_cast<T *>(this)
659 ->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
661 // Else, assume that we need to scalarize this intrinsic. For math builtins
662 // this will emit a costly libcall, adding call overhead and spills. Make it
664 if (RetTy->isVectorTy()) {
665 unsigned ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
666 unsigned ScalarCalls = RetTy->getVectorNumElements();
667 SmallVector<Type *, 4> ScalarTys;
668 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
670 if (Ty->isVectorTy())
671 Ty = Ty->getScalarType();
672 ScalarTys.push_back(Ty);
674 unsigned ScalarCost = static_cast<T *>(this)->getIntrinsicInstrCost(
675 IID, RetTy->getScalarType(), ScalarTys);
676 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
677 if (Tys[i]->isVectorTy()) {
678 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
679 ScalarCalls = std::max(ScalarCalls, Tys[i]->getVectorNumElements());
683 return ScalarCalls * ScalarCost + ScalarizationCost;
686 // This is going to be turned into a library call, make it expensive.
690 /// \brief Compute a cost of the given call instruction.
692 /// Compute the cost of calling function F with return type RetTy and
693 /// argument types Tys. F might be nullptr, in this case the cost of an
694 /// arbitrary call with the specified signature will be returned.
695 /// This is used, for instance, when we estimate call of a vector
696 /// counterpart of the given function.
697 /// \param F Called function, might be nullptr.
698 /// \param RetTy Return value types.
699 /// \param Tys Argument types.
700 /// \returns The cost of Call instruction.
701 unsigned getCallInstrCost(Function *F, Type *RetTy, ArrayRef<Type *> Tys) {
705 unsigned getNumberOfParts(Type *Tp) {
706 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
710 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) { return 0; }
712 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwise) {
713 assert(Ty->isVectorTy() && "Expect a vector type");
714 unsigned NumVecElts = Ty->getVectorNumElements();
715 unsigned NumReduxLevels = Log2_32(NumVecElts);
718 static_cast<T *>(this)->getArithmeticInstrCost(Opcode, Ty);
719 // Assume the pairwise shuffles add a cost.
720 unsigned ShuffleCost =
721 NumReduxLevels * (IsPairwise + 1) *
722 static_cast<T *>(this)
723 ->getShuffleCost(TTI::SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
724 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);
730 /// \brief Concrete BasicTTIImpl that can be used if no further customization
732 class BasicTTIImpl : public BasicTTIImplBase<BasicTTIImpl> {
733 typedef BasicTTIImplBase<BasicTTIImpl> BaseT;
734 friend class BasicTTIImplBase<BasicTTIImpl>;
736 const TargetSubtargetInfo *ST;
737 const TargetLoweringBase *TLI;
739 const TargetSubtargetInfo *getST() const { return ST; }
740 const TargetLoweringBase *getTLI() const { return TLI; }
743 explicit BasicTTIImpl(const TargetMachine *ST, Function &F);
745 // Provide value semantics. MSVC requires that we spell all of these out.
746 BasicTTIImpl(const BasicTTIImpl &Arg)
747 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
748 BasicTTIImpl(BasicTTIImpl &&Arg)
749 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
750 TLI(std::move(Arg.TLI)) {}
751 BasicTTIImpl &operator=(const BasicTTIImpl &RHS) {
752 BaseT::operator=(static_cast<const BaseT &>(RHS));
757 BasicTTIImpl &operator=(BasicTTIImpl &&RHS) {
758 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
759 ST = std::move(RHS.ST);
760 TLI = std::move(RHS.TLI);