1 //===- BasicTargetTransformInfo.cpp - Basic target-independent TTI impl ---===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file provides the implementation of a basic TargetTransformInfo pass
11 /// predicated on the target abstractions present in the target independent
12 /// code generator. It uses these (primarily TargetLowering) to model as much
13 /// of the TTI query interface as possible. It is included by most targets so
14 /// that they can specialize only a small subset of the query space.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/CodeGen/Passes.h"
19 #include "llvm/Analysis/LoopInfo.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/CommandLine.h"
22 #include "llvm/Target/TargetLowering.h"
23 #include "llvm/Target/TargetSubtargetInfo.h"
27 static cl::opt<unsigned>
28 PartialUnrollingThreshold("partial-unrolling-threshold", cl::init(0),
29 cl::desc("Threshold for partial unrolling"), cl::Hidden);
31 #define DEBUG_TYPE "basictti"
35 class BasicTTI final : public ImmutablePass, public TargetTransformInfo {
36 const TargetMachine *TM;
38 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
39 /// are set if the result needs to be inserted and/or extracted from vectors.
40 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
42 /// Estimate the cost overhead of SK_Alternate shuffle.
43 unsigned getAltShuffleOverhead(Type *Ty) const;
45 const TargetLoweringBase *getTLI() const {
46 return TM->getSubtargetImpl()->getTargetLowering();
50 BasicTTI() : ImmutablePass(ID), TM(nullptr) {
51 llvm_unreachable("This pass cannot be directly constructed");
54 BasicTTI(const TargetMachine *TM) : ImmutablePass(ID), TM(TM) {
55 initializeBasicTTIPass(*PassRegistry::getPassRegistry());
58 void initializePass() override {
62 void getAnalysisUsage(AnalysisUsage &AU) const override {
63 TargetTransformInfo::getAnalysisUsage(AU);
66 /// Pass identification.
69 /// Provide necessary pointer adjustments for the two base classes.
70 void *getAdjustedAnalysisPointer(const void *ID) override {
71 if (ID == &TargetTransformInfo::ID)
72 return (TargetTransformInfo*)this;
76 bool hasBranchDivergence() const override;
78 /// \name Scalar TTI Implementations
81 bool isLegalAddImmediate(int64_t imm) const override;
82 bool isLegalICmpImmediate(int64_t imm) const override;
83 bool isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
84 int64_t BaseOffset, bool HasBaseReg,
85 int64_t Scale) const override;
86 int getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
87 int64_t BaseOffset, bool HasBaseReg,
88 int64_t Scale) const override;
89 bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
90 bool isTypeLegal(Type *Ty) const override;
91 unsigned getJumpBufAlignment() const override;
92 unsigned getJumpBufSize() const override;
93 bool shouldBuildLookupTables() const override;
94 bool haveFastSqrt(Type *Ty) const override;
95 void getUnrollingPreferences(Loop *L,
96 UnrollingPreferences &UP) const override;
100 /// \name Vector TTI Implementations
103 unsigned getNumberOfRegisters(bool Vector) const override;
104 unsigned getMaximumUnrollFactor() const override;
105 unsigned getRegisterBitWidth(bool Vector) const override;
106 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
107 OperandValueKind) const override;
108 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
109 int Index, Type *SubTp) const override;
110 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
111 Type *Src) const override;
112 unsigned getCFInstrCost(unsigned Opcode) const override;
113 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
114 Type *CondTy) const override;
115 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
116 unsigned Index) const override;
117 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
118 unsigned AddressSpace) const override;
119 unsigned getIntrinsicInstrCost(Intrinsic::ID, Type *RetTy,
120 ArrayRef<Type*> Tys) const override;
121 unsigned getNumberOfParts(Type *Tp) const override;
122 unsigned getAddressComputationCost( Type *Ty, bool IsComplex) const override;
123 unsigned getReductionCost(unsigned Opcode, Type *Ty,
124 bool IsPairwise) const override;
131 INITIALIZE_AG_PASS(BasicTTI, TargetTransformInfo, "basictti",
132 "Target independent code generator's TTI", true, true, false)
133 char BasicTTI::ID = 0;
136 llvm::createBasicTargetTransformInfoPass(const TargetMachine *TM) {
137 return new BasicTTI(TM);
140 bool BasicTTI::hasBranchDivergence() const { return false; }
142 bool BasicTTI::isLegalAddImmediate(int64_t imm) const {
143 return getTLI()->isLegalAddImmediate(imm);
146 bool BasicTTI::isLegalICmpImmediate(int64_t imm) const {
147 return getTLI()->isLegalICmpImmediate(imm);
150 bool BasicTTI::isLegalAddressingMode(Type *Ty, GlobalValue *BaseGV,
151 int64_t BaseOffset, bool HasBaseReg,
152 int64_t Scale) const {
153 TargetLoweringBase::AddrMode AM;
155 AM.BaseOffs = BaseOffset;
156 AM.HasBaseReg = HasBaseReg;
158 return getTLI()->isLegalAddressingMode(AM, Ty);
161 int BasicTTI::getScalingFactorCost(Type *Ty, GlobalValue *BaseGV,
162 int64_t BaseOffset, bool HasBaseReg,
163 int64_t Scale) const {
164 TargetLoweringBase::AddrMode AM;
166 AM.BaseOffs = BaseOffset;
167 AM.HasBaseReg = HasBaseReg;
169 return getTLI()->getScalingFactorCost(AM, Ty);
172 bool BasicTTI::isTruncateFree(Type *Ty1, Type *Ty2) const {
173 return getTLI()->isTruncateFree(Ty1, Ty2);
176 bool BasicTTI::isTypeLegal(Type *Ty) const {
177 EVT T = getTLI()->getValueType(Ty);
178 return getTLI()->isTypeLegal(T);
181 unsigned BasicTTI::getJumpBufAlignment() const {
182 return getTLI()->getJumpBufAlignment();
185 unsigned BasicTTI::getJumpBufSize() const {
186 return getTLI()->getJumpBufSize();
189 bool BasicTTI::shouldBuildLookupTables() const {
190 const TargetLoweringBase *TLI = getTLI();
191 return TLI->supportJumpTables() &&
192 (TLI->isOperationLegalOrCustom(ISD::BR_JT, MVT::Other) ||
193 TLI->isOperationLegalOrCustom(ISD::BRIND, MVT::Other));
196 bool BasicTTI::haveFastSqrt(Type *Ty) const {
197 const TargetLoweringBase *TLI = getTLI();
198 EVT VT = TLI->getValueType(Ty);
199 return TLI->isTypeLegal(VT) && TLI->isOperationLegalOrCustom(ISD::FSQRT, VT);
202 void BasicTTI::getUnrollingPreferences(Loop *L,
203 UnrollingPreferences &UP) const {
204 // This unrolling functionality is target independent, but to provide some
205 // motivation for its intended use, for x86:
207 // According to the Intel 64 and IA-32 Architectures Optimization Reference
208 // Manual, Intel Core models and later have a loop stream detector
209 // (and associated uop queue) that can benefit from partial unrolling.
210 // The relevant requirements are:
211 // - The loop must have no more than 4 (8 for Nehalem and later) branches
212 // taken, and none of them may be calls.
213 // - The loop can have no more than 18 (28 for Nehalem and later) uops.
215 // According to the Software Optimization Guide for AMD Family 15h Processors,
216 // models 30h-4fh (Steamroller and later) have a loop predictor and loop
217 // buffer which can benefit from partial unrolling.
218 // The relevant requirements are:
219 // - The loop must have fewer than 16 branches
220 // - The loop must have less than 40 uops in all executed loop branches
222 // The number of taken branches in a loop is hard to estimate here, and
223 // benchmarking has revealed that it is better not to be conservative when
224 // estimating the branch count. As a result, we'll ignore the branch limits
225 // until someone finds a case where it matters in practice.
228 const TargetSubtargetInfo *ST = &TM->getSubtarget<TargetSubtargetInfo>();
229 if (PartialUnrollingThreshold.getNumOccurrences() > 0)
230 MaxOps = PartialUnrollingThreshold;
231 else if (ST->getSchedModel()->LoopMicroOpBufferSize > 0)
232 MaxOps = ST->getSchedModel()->LoopMicroOpBufferSize;
236 // Scan the loop: don't unroll loops with calls.
237 for (Loop::block_iterator I = L->block_begin(), E = L->block_end();
241 for (BasicBlock::iterator J = BB->begin(), JE = BB->end(); J != JE; ++J)
242 if (isa<CallInst>(J) || isa<InvokeInst>(J)) {
243 ImmutableCallSite CS(J);
244 if (const Function *F = CS.getCalledFunction()) {
245 if (!TopTTI->isLoweredToCall(F))
253 // Enable runtime and partial unrolling up to the specified size.
254 UP.Partial = UP.Runtime = true;
255 UP.PartialThreshold = UP.PartialOptSizeThreshold = MaxOps;
258 //===----------------------------------------------------------------------===//
260 // Calls used by the vectorizers.
262 //===----------------------------------------------------------------------===//
264 unsigned BasicTTI::getScalarizationOverhead(Type *Ty, bool Insert,
265 bool Extract) const {
266 assert (Ty->isVectorTy() && "Can only scalarize vectors");
269 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
271 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
273 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
279 unsigned BasicTTI::getNumberOfRegisters(bool Vector) const {
283 unsigned BasicTTI::getRegisterBitWidth(bool Vector) const {
287 unsigned BasicTTI::getMaximumUnrollFactor() const {
291 unsigned BasicTTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
293 OperandValueKind) const {
294 // Check if any of the operands are vector operands.
295 const TargetLoweringBase *TLI = getTLI();
296 int ISD = TLI->InstructionOpcodeToISD(Opcode);
297 assert(ISD && "Invalid opcode");
299 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
301 bool IsFloat = Ty->getScalarType()->isFloatingPointTy();
302 // Assume that floating point arithmetic operations cost twice as much as
303 // integer operations.
304 unsigned OpCost = (IsFloat ? 2 : 1);
306 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
307 // The operation is legal. Assume it costs 1.
308 // If the type is split to multiple registers, assume that there is some
310 // TODO: Once we have extract/insert subvector cost we need to use them.
312 return LT.first * 2 * OpCost;
313 return LT.first * 1 * OpCost;
316 if (!TLI->isOperationExpand(ISD, LT.second)) {
317 // If the operation is custom lowered then assume
318 // thare the code is twice as expensive.
319 return LT.first * 2 * OpCost;
322 // Else, assume that we need to scalarize this op.
323 if (Ty->isVectorTy()) {
324 unsigned Num = Ty->getVectorNumElements();
325 unsigned Cost = TopTTI->getArithmeticInstrCost(Opcode, Ty->getScalarType());
326 // return the cost of multiple scalar invocation plus the cost of inserting
327 // and extracting the values.
328 return getScalarizationOverhead(Ty, true, true) + Num * Cost;
331 // We don't know anything about this scalar instruction.
335 unsigned BasicTTI::getAltShuffleOverhead(Type *Ty) const {
336 assert(Ty->isVectorTy() && "Can only shuffle vectors");
338 // Shuffle cost is equal to the cost of extracting element from its argument
339 // plus the cost of inserting them onto the result vector.
341 // e.g. <4 x float> has a mask of <0,5,2,7> i.e we need to extract from index
342 // 0 of first vector, index 1 of second vector,index 2 of first vector and
343 // finally index 3 of second vector and insert them at index <0,1,2,3> of
345 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
346 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
347 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
352 unsigned BasicTTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
354 if (Kind == SK_Alternate) {
355 return getAltShuffleOverhead(Tp);
360 unsigned BasicTTI::getCastInstrCost(unsigned Opcode, Type *Dst,
362 const TargetLoweringBase *TLI = getTLI();
363 int ISD = TLI->InstructionOpcodeToISD(Opcode);
364 assert(ISD && "Invalid opcode");
366 std::pair<unsigned, MVT> SrcLT = TLI->getTypeLegalizationCost(Src);
367 std::pair<unsigned, MVT> DstLT = TLI->getTypeLegalizationCost(Dst);
369 // Check for NOOP conversions.
370 if (SrcLT.first == DstLT.first &&
371 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
373 // Bitcast between types that are legalized to the same type are free.
374 if (Opcode == Instruction::BitCast || Opcode == Instruction::Trunc)
378 if (Opcode == Instruction::Trunc &&
379 TLI->isTruncateFree(SrcLT.second, DstLT.second))
382 if (Opcode == Instruction::ZExt &&
383 TLI->isZExtFree(SrcLT.second, DstLT.second))
386 // If the cast is marked as legal (or promote) then assume low cost.
387 if (SrcLT.first == DstLT.first &&
388 TLI->isOperationLegalOrPromote(ISD, DstLT.second))
391 // Handle scalar conversions.
392 if (!Src->isVectorTy() && !Dst->isVectorTy()) {
394 // Scalar bitcasts are usually free.
395 if (Opcode == Instruction::BitCast)
398 // Just check the op cost. If the operation is legal then assume it costs 1.
399 if (!TLI->isOperationExpand(ISD, DstLT.second))
402 // Assume that illegal scalar instruction are expensive.
406 // Check vector-to-vector casts.
407 if (Dst->isVectorTy() && Src->isVectorTy()) {
409 // If the cast is between same-sized registers, then the check is simple.
410 if (SrcLT.first == DstLT.first &&
411 SrcLT.second.getSizeInBits() == DstLT.second.getSizeInBits()) {
413 // Assume that Zext is done using AND.
414 if (Opcode == Instruction::ZExt)
417 // Assume that sext is done using SHL and SRA.
418 if (Opcode == Instruction::SExt)
421 // Just check the op cost. If the operation is legal then assume it costs
422 // 1 and multiply by the type-legalization overhead.
423 if (!TLI->isOperationExpand(ISD, DstLT.second))
424 return SrcLT.first * 1;
427 // If we are converting vectors and the operation is illegal, or
428 // if the vectors are legalized to different types, estimate the
429 // scalarization costs.
430 unsigned Num = Dst->getVectorNumElements();
431 unsigned Cost = TopTTI->getCastInstrCost(Opcode, Dst->getScalarType(),
432 Src->getScalarType());
434 // Return the cost of multiple scalar invocation plus the cost of
435 // inserting and extracting the values.
436 return getScalarizationOverhead(Dst, true, true) + Num * Cost;
439 // We already handled vector-to-vector and scalar-to-scalar conversions. This
440 // is where we handle bitcast between vectors and scalars. We need to assume
441 // that the conversion is scalarized in one way or another.
442 if (Opcode == Instruction::BitCast)
443 // Illegal bitcasts are done by storing and loading from a stack slot.
444 return (Src->isVectorTy()? getScalarizationOverhead(Src, false, true):0) +
445 (Dst->isVectorTy()? getScalarizationOverhead(Dst, true, false):0);
447 llvm_unreachable("Unhandled cast");
450 unsigned BasicTTI::getCFInstrCost(unsigned Opcode) const {
451 // Branches are assumed to be predicted.
455 unsigned BasicTTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
456 Type *CondTy) const {
457 const TargetLoweringBase *TLI = getTLI();
458 int ISD = TLI->InstructionOpcodeToISD(Opcode);
459 assert(ISD && "Invalid opcode");
461 // Selects on vectors are actually vector selects.
462 if (ISD == ISD::SELECT) {
463 assert(CondTy && "CondTy must exist");
464 if (CondTy->isVectorTy())
468 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
470 if (!TLI->isOperationExpand(ISD, LT.second)) {
471 // The operation is legal. Assume it costs 1. Multiply
472 // by the type-legalization overhead.
476 // Otherwise, assume that the cast is scalarized.
477 if (ValTy->isVectorTy()) {
478 unsigned Num = ValTy->getVectorNumElements();
480 CondTy = CondTy->getScalarType();
481 unsigned Cost = TopTTI->getCmpSelInstrCost(Opcode, ValTy->getScalarType(),
484 // Return the cost of multiple scalar invocation plus the cost of inserting
485 // and extracting the values.
486 return getScalarizationOverhead(ValTy, true, false) + Num * Cost;
489 // Unknown scalar opcode.
493 unsigned BasicTTI::getVectorInstrCost(unsigned Opcode, Type *Val,
494 unsigned Index) const {
495 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Val->getScalarType());
500 unsigned BasicTTI::getMemoryOpCost(unsigned Opcode, Type *Src,
502 unsigned AddressSpace) const {
503 assert(!Src->isVoidTy() && "Invalid type");
504 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Src);
506 // Assuming that all loads of legal types cost 1.
507 unsigned Cost = LT.first;
509 if (Src->isVectorTy() &&
510 Src->getPrimitiveSizeInBits() < LT.second.getSizeInBits()) {
511 // This is a vector load that legalizes to a larger type than the vector
512 // itself. Unless the corresponding extending load or truncating store is
513 // legal, then this will scalarize.
514 TargetLowering::LegalizeAction LA = TargetLowering::Expand;
515 EVT MemVT = getTLI()->getValueType(Src, true);
516 if (MemVT.isSimple() && MemVT != MVT::Other) {
517 if (Opcode == Instruction::Store)
518 LA = getTLI()->getTruncStoreAction(LT.second, MemVT.getSimpleVT());
520 LA = getTLI()->getLoadExtAction(ISD::EXTLOAD, MemVT.getSimpleVT());
523 if (LA != TargetLowering::Legal && LA != TargetLowering::Custom) {
524 // This is a vector load/store for some illegal type that is scalarized.
525 // We must account for the cost of building or decomposing the vector.
526 Cost += getScalarizationOverhead(Src, Opcode != Instruction::Store,
527 Opcode == Instruction::Store);
534 unsigned BasicTTI::getIntrinsicInstrCost(Intrinsic::ID IID, Type *RetTy,
535 ArrayRef<Type *> Tys) const {
539 // Assume that we need to scalarize this intrinsic.
540 unsigned ScalarizationCost = 0;
541 unsigned ScalarCalls = 1;
542 if (RetTy->isVectorTy()) {
543 ScalarizationCost = getScalarizationOverhead(RetTy, true, false);
544 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
546 for (unsigned i = 0, ie = Tys.size(); i != ie; ++i) {
547 if (Tys[i]->isVectorTy()) {
548 ScalarizationCost += getScalarizationOverhead(Tys[i], false, true);
549 ScalarCalls = std::max(ScalarCalls, RetTy->getVectorNumElements());
553 return ScalarCalls + ScalarizationCost;
555 // Look for intrinsics that can be lowered directly or turned into a scalar
557 case Intrinsic::sqrt: ISD = ISD::FSQRT; break;
558 case Intrinsic::sin: ISD = ISD::FSIN; break;
559 case Intrinsic::cos: ISD = ISD::FCOS; break;
560 case Intrinsic::exp: ISD = ISD::FEXP; break;
561 case Intrinsic::exp2: ISD = ISD::FEXP2; break;
562 case Intrinsic::log: ISD = ISD::FLOG; break;
563 case Intrinsic::log10: ISD = ISD::FLOG10; break;
564 case Intrinsic::log2: ISD = ISD::FLOG2; break;
565 case Intrinsic::fabs: ISD = ISD::FABS; break;
566 case Intrinsic::copysign: ISD = ISD::FCOPYSIGN; break;
567 case Intrinsic::floor: ISD = ISD::FFLOOR; break;
568 case Intrinsic::ceil: ISD = ISD::FCEIL; break;
569 case Intrinsic::trunc: ISD = ISD::FTRUNC; break;
570 case Intrinsic::nearbyint:
571 ISD = ISD::FNEARBYINT; break;
572 case Intrinsic::rint: ISD = ISD::FRINT; break;
573 case Intrinsic::round: ISD = ISD::FROUND; break;
574 case Intrinsic::pow: ISD = ISD::FPOW; break;
575 case Intrinsic::fma: ISD = ISD::FMA; break;
576 case Intrinsic::fmuladd: ISD = ISD::FMA; break;
577 // FIXME: We should return 0 whenever getIntrinsicCost == TCC_Free.
578 case Intrinsic::lifetime_start:
579 case Intrinsic::lifetime_end:
583 const TargetLoweringBase *TLI = getTLI();
584 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(RetTy);
586 if (TLI->isOperationLegalOrPromote(ISD, LT.second)) {
587 // The operation is legal. Assume it costs 1.
588 // If the type is split to multiple registers, assume that thre is some
590 // TODO: Once we have extract/insert subvector cost we need to use them.
596 if (!TLI->isOperationExpand(ISD, LT.second)) {
597 // If the operation is custom lowered then assume
598 // thare the code is twice as expensive.
602 // If we can't lower fmuladd into an FMA estimate the cost as a floating
603 // point mul followed by an add.
604 if (IID == Intrinsic::fmuladd)
605 return TopTTI->getArithmeticInstrCost(BinaryOperator::FMul, RetTy) +
606 TopTTI->getArithmeticInstrCost(BinaryOperator::FAdd, RetTy);
608 // Else, assume that we need to scalarize this intrinsic. For math builtins
609 // this will emit a costly libcall, adding call overhead and spills. Make it
611 if (RetTy->isVectorTy()) {
612 unsigned Num = RetTy->getVectorNumElements();
613 unsigned Cost = TopTTI->getIntrinsicInstrCost(IID, RetTy->getScalarType(),
615 return 10 * Cost * Num;
618 // This is going to be turned into a library call, make it expensive.
622 unsigned BasicTTI::getNumberOfParts(Type *Tp) const {
623 std::pair<unsigned, MVT> LT = getTLI()->getTypeLegalizationCost(Tp);
627 unsigned BasicTTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
631 unsigned BasicTTI::getReductionCost(unsigned Opcode, Type *Ty,
632 bool IsPairwise) const {
633 assert(Ty->isVectorTy() && "Expect a vector type");
634 unsigned NumVecElts = Ty->getVectorNumElements();
635 unsigned NumReduxLevels = Log2_32(NumVecElts);
636 unsigned ArithCost = NumReduxLevels *
637 TopTTI->getArithmeticInstrCost(Opcode, Ty);
638 // Assume the pairwise shuffles add a cost.
639 unsigned ShuffleCost =
640 NumReduxLevels * (IsPairwise + 1) *
641 TopTTI->getShuffleCost(SK_ExtractSubvector, Ty, NumVecElts / 2, Ty);
642 return ShuffleCost + ArithCost + getScalarizationOverhead(Ty, false, true);