1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI pass --------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// AArch64 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "AArch64TargetMachine.h"
19 #include "MCTargetDesc/AArch64AddressingModes.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "aarch64tti"
29 // Declare the pass initialization routine locally as target-specific passes
30 // don't have a target-wide initialization entry point, and so we rely on the
31 // pass constructor initialization.
33 void initializeAArch64TTIPass(PassRegistry &);
38 class AArch64TTI final : public ImmutablePass, public TargetTransformInfo {
39 const AArch64TargetMachine *TM;
40 const AArch64Subtarget *ST;
41 const AArch64TargetLowering *TLI;
43 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
44 /// are set if the result needs to be inserted and/or extracted from vectors.
45 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
48 AArch64TTI() : ImmutablePass(ID), TM(nullptr), ST(nullptr), TLI(nullptr) {
49 llvm_unreachable("This pass cannot be directly constructed");
52 AArch64TTI(const AArch64TargetMachine *TM)
53 : ImmutablePass(ID), TM(TM), ST(TM->getSubtargetImpl()),
54 TLI(TM->getSubtargetImpl()->getTargetLowering()) {
55 initializeAArch64TTIPass(*PassRegistry::getPassRegistry());
58 void initializePass() override { pushTTIStack(this); }
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 TargetTransformInfo::getAnalysisUsage(AU);
64 /// Pass identification.
67 /// Provide necessary pointer adjustments for the two base classes.
68 void *getAdjustedAnalysisPointer(const void *ID) override {
69 if (ID == &TargetTransformInfo::ID)
70 return (TargetTransformInfo *)this;
74 /// \name Scalar TTI Implementations
76 unsigned getIntImmCost(int64_t Val) const;
77 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
78 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
79 Type *Ty) const override;
80 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
81 Type *Ty) const override;
82 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
86 /// \name Vector TTI Implementations
89 unsigned getNumberOfRegisters(bool Vector) const override {
98 unsigned getRegisterBitWidth(bool Vector) const override {
107 unsigned getMaximumUnrollFactor() const override;
109 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const
112 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) const
115 unsigned getArithmeticInstrCost(
116 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info = OK_AnyValue,
117 OperandValueKind Opd2Info = OK_AnyValue,
118 OperandValueProperties Opd1PropInfo = OP_None,
119 OperandValueProperties Opd2PropInfo = OP_None) const override;
121 unsigned getAddressComputationCost(Type *Ty, bool IsComplex) const override;
123 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) const
126 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
127 unsigned AddressSpace) const override;
129 unsigned getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const override;
134 } // end anonymous namespace
136 INITIALIZE_AG_PASS(AArch64TTI, TargetTransformInfo, "aarch64tti",
137 "AArch64 Target Transform Info", true, true, false)
138 char AArch64TTI::ID = 0;
141 llvm::createAArch64TargetTransformInfoPass(const AArch64TargetMachine *TM) {
142 return new AArch64TTI(TM);
145 /// \brief Calculate the cost of materializing a 64-bit value. This helper
146 /// method might only calculate a fraction of a larger immediate. Therefore it
147 /// is valid to return a cost of ZERO.
148 unsigned AArch64TTI::getIntImmCost(int64_t Val) const {
149 // Check if the immediate can be encoded within an instruction.
150 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
156 // Calculate how many moves we will need to materialize this constant.
157 unsigned LZ = countLeadingZeros((uint64_t)Val);
158 return (64 - LZ + 15) / 16;
161 /// \brief Calculate the cost of materializing the given constant.
162 unsigned AArch64TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
163 assert(Ty->isIntegerTy());
165 unsigned BitSize = Ty->getPrimitiveSizeInBits();
169 // Sign-extend all constants to a multiple of 64-bit.
172 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
174 // Split the constant into 64-bit chunks and calculate the cost for each
177 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
178 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
179 int64_t Val = Tmp.getSExtValue();
180 Cost += getIntImmCost(Val);
182 // We need at least one instruction to materialze the constant.
183 return std::max(1U, Cost);
186 unsigned AArch64TTI::getIntImmCost(unsigned Opcode, unsigned Idx,
187 const APInt &Imm, Type *Ty) const {
188 assert(Ty->isIntegerTy());
190 unsigned BitSize = Ty->getPrimitiveSizeInBits();
191 // There is no cost model for constants with a bit size of 0. Return TCC_Free
192 // here, so that constant hoisting will ignore this constant.
196 unsigned ImmIdx = ~0U;
200 case Instruction::GetElementPtr:
201 // Always hoist the base address of a GetElementPtr.
203 return 2 * TCC_Basic;
205 case Instruction::Store:
208 case Instruction::Add:
209 case Instruction::Sub:
210 case Instruction::Mul:
211 case Instruction::UDiv:
212 case Instruction::SDiv:
213 case Instruction::URem:
214 case Instruction::SRem:
215 case Instruction::And:
216 case Instruction::Or:
217 case Instruction::Xor:
218 case Instruction::ICmp:
221 // Always return TCC_Free for the shift value of a shift instruction.
222 case Instruction::Shl:
223 case Instruction::LShr:
224 case Instruction::AShr:
228 case Instruction::Trunc:
229 case Instruction::ZExt:
230 case Instruction::SExt:
231 case Instruction::IntToPtr:
232 case Instruction::PtrToInt:
233 case Instruction::BitCast:
234 case Instruction::PHI:
235 case Instruction::Call:
236 case Instruction::Select:
237 case Instruction::Ret:
238 case Instruction::Load:
243 unsigned NumConstants = (BitSize + 63) / 64;
244 unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
245 return (Cost <= NumConstants * TCC_Basic)
246 ? static_cast<unsigned>(TCC_Free) : Cost;
248 return AArch64TTI::getIntImmCost(Imm, Ty);
251 unsigned AArch64TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
252 const APInt &Imm, Type *Ty) const {
253 assert(Ty->isIntegerTy());
255 unsigned BitSize = Ty->getPrimitiveSizeInBits();
256 // There is no cost model for constants with a bit size of 0. Return TCC_Free
257 // here, so that constant hoisting will ignore this constant.
264 case Intrinsic::sadd_with_overflow:
265 case Intrinsic::uadd_with_overflow:
266 case Intrinsic::ssub_with_overflow:
267 case Intrinsic::usub_with_overflow:
268 case Intrinsic::smul_with_overflow:
269 case Intrinsic::umul_with_overflow:
271 unsigned NumConstants = (BitSize + 63) / 64;
272 unsigned Cost = AArch64TTI::getIntImmCost(Imm, Ty);
273 return (Cost <= NumConstants * TCC_Basic)
274 ? static_cast<unsigned>(TCC_Free) : Cost;
277 case Intrinsic::experimental_stackmap:
278 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
281 case Intrinsic::experimental_patchpoint_void:
282 case Intrinsic::experimental_patchpoint_i64:
283 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
287 return AArch64TTI::getIntImmCost(Imm, Ty);
290 AArch64TTI::PopcntSupportKind
291 AArch64TTI::getPopcntSupport(unsigned TyWidth) const {
292 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
293 if (TyWidth == 32 || TyWidth == 64)
294 return PSK_FastHardware;
295 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
299 unsigned AArch64TTI::getCastInstrCost(unsigned Opcode, Type *Dst,
301 int ISD = TLI->InstructionOpcodeToISD(Opcode);
302 assert(ISD && "Invalid opcode");
304 EVT SrcTy = TLI->getValueType(Src);
305 EVT DstTy = TLI->getValueType(Dst);
307 if (!SrcTy.isSimple() || !DstTy.isSimple())
308 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
310 static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
311 // LowerVectorINT_TO_FP:
312 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
313 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
314 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
315 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
316 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
317 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
320 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
321 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
322 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
323 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
324 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
325 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
328 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
329 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
330 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
331 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
334 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
335 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
336 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
337 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
338 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
339 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
342 // LowerVectorFP_TO_INT
343 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
344 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
345 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
346 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
347 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
348 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
350 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
351 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
352 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
353 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
354 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
355 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
356 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
358 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
359 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
360 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
361 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
362 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
364 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
365 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
366 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
367 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
368 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
369 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
370 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
373 int Idx = ConvertCostTableLookup<MVT>(
374 ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
375 SrcTy.getSimpleVT());
377 return ConversionTbl[Idx].Cost;
379 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
382 unsigned AArch64TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
383 unsigned Index) const {
384 assert(Val->isVectorTy() && "This must be a vector type");
387 // Legalize the type.
388 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
390 // This type is legalized to a scalar type.
391 if (!LT.second.isVector())
394 // The type may be split. Normalize the index to the new type.
395 unsigned Width = LT.second.getVectorNumElements();
396 Index = Index % Width;
398 // The element at index zero is already inside the vector.
403 // All other insert/extracts cost this much.
407 unsigned AArch64TTI::getArithmeticInstrCost(
408 unsigned Opcode, Type *Ty, OperandValueKind Opd1Info,
409 OperandValueKind Opd2Info, OperandValueProperties Opd1PropInfo,
410 OperandValueProperties Opd2PropInfo) const {
411 // Legalize the type.
412 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
414 int ISD = TLI->InstructionOpcodeToISD(Opcode);
418 return TargetTransformInfo::getArithmeticInstrCost(
419 Opcode, Ty, Opd1Info, Opd2Info, Opd1PropInfo, Opd2PropInfo);
425 // These nodes are marked as 'custom' for combining purposes only.
426 // We know that they are legal. See LowerAdd in ISelLowering.
431 unsigned AArch64TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
432 // Address computations in vectorized code with non-consecutive addresses will
433 // likely result in more instructions compared to scalar code where the
434 // computation can more often be merged into the index mode. The resulting
435 // extra micro-ops can significantly decrease throughput.
436 unsigned NumVectorInstToHideOverhead = 10;
438 if (Ty->isVectorTy() && IsComplex)
439 return NumVectorInstToHideOverhead;
441 // In many cases the address computation is not merged into the instruction
446 unsigned AArch64TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
447 Type *CondTy) const {
449 int ISD = TLI->InstructionOpcodeToISD(Opcode);
450 // We don't lower vector selects well that are wider than the register width.
451 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
452 // We would need this many instructions to hide the scalarization happening.
453 unsigned AmortizationCost = 20;
454 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
455 VectorSelectTbl[] = {
456 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
457 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
458 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
459 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
460 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
461 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
464 EVT SelCondTy = TLI->getValueType(CondTy);
465 EVT SelValTy = TLI->getValueType(ValTy);
466 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
468 ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
469 SelValTy.getSimpleVT());
471 return VectorSelectTbl[Idx].Cost;
474 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
477 unsigned AArch64TTI::getMemoryOpCost(unsigned Opcode, Type *Src,
479 unsigned AddressSpace) const {
480 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
482 if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
483 Src->getVectorElementType()->isIntegerTy(64)) {
484 // Unaligned stores are extremely inefficient. We don't split
485 // unaligned v2i64 stores because the negative impact that has shown in
486 // practice on inlined memcpy code.
487 // We make v2i64 stores expensive so that we will only vectorize if there
488 // are 6 other instructions getting vectorized.
489 unsigned AmortizationCost = 6;
491 return LT.first * 2 * AmortizationCost;
494 if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
495 Src->getVectorNumElements() < 8) {
496 // We scalarize the loads/stores because there is not v.4b register and we
497 // have to promote the elements to v.4h.
498 unsigned NumVecElts = Src->getVectorNumElements();
499 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
500 // We generate 2 instructions per vector element.
501 return NumVectorizableInstsToAmortize * NumVecElts * 2;
507 unsigned AArch64TTI::getCostOfKeepingLiveOverCall(ArrayRef<Type*> Tys) const {
509 for (auto *I : Tys) {
510 if (!I->isVectorTy())
512 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
513 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
514 getMemoryOpCost(Instruction::Load, I, 128, 0);
519 unsigned AArch64TTI::getMaximumUnrollFactor() const {
520 if (ST->isCortexA57() || ST->isCyclone())