1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "AArch64TargetTransformInfo.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/Support/Debug.h"
16 #include "llvm/Target/CostTable.h"
17 #include "llvm/Target/TargetLowering.h"
21 #define DEBUG_TYPE "aarch64tti"
23 /// \brief Calculate the cost of materializing a 64-bit value. This helper
24 /// method might only calculate a fraction of a larger immediate. Therefore it
25 /// is valid to return a cost of ZERO.
26 unsigned AArch64TTIImpl::getIntImmCost(int64_t Val) {
27 // Check if the immediate can be encoded within an instruction.
28 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
34 // Calculate how many moves we will need to materialize this constant.
35 unsigned LZ = countLeadingZeros((uint64_t)Val);
36 return (64 - LZ + 15) / 16;
39 /// \brief Calculate the cost of materializing the given constant.
40 unsigned AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41 assert(Ty->isIntegerTy());
43 unsigned BitSize = Ty->getPrimitiveSizeInBits();
47 // Sign-extend all constants to a multiple of 64-bit.
50 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
52 // Split the constant into 64-bit chunks and calculate the cost for each
55 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57 int64_t Val = Tmp.getSExtValue();
58 Cost += getIntImmCost(Val);
60 // We need at least one instruction to materialze the constant.
61 return std::max(1U, Cost);
64 unsigned AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65 const APInt &Imm, Type *Ty) {
66 assert(Ty->isIntegerTy());
68 unsigned BitSize = Ty->getPrimitiveSizeInBits();
69 // There is no cost model for constants with a bit size of 0. Return TCC_Free
70 // here, so that constant hoisting will ignore this constant.
74 unsigned ImmIdx = ~0U;
78 case Instruction::GetElementPtr:
79 // Always hoist the base address of a GetElementPtr.
81 return 2 * TTI::TCC_Basic;
83 case Instruction::Store:
86 case Instruction::Add:
87 case Instruction::Sub:
88 case Instruction::Mul:
89 case Instruction::UDiv:
90 case Instruction::SDiv:
91 case Instruction::URem:
92 case Instruction::SRem:
93 case Instruction::And:
95 case Instruction::Xor:
96 case Instruction::ICmp:
99 // Always return TCC_Free for the shift value of a shift instruction.
100 case Instruction::Shl:
101 case Instruction::LShr:
102 case Instruction::AShr:
104 return TTI::TCC_Free;
106 case Instruction::Trunc:
107 case Instruction::ZExt:
108 case Instruction::SExt:
109 case Instruction::IntToPtr:
110 case Instruction::PtrToInt:
111 case Instruction::BitCast:
112 case Instruction::PHI:
113 case Instruction::Call:
114 case Instruction::Select:
115 case Instruction::Ret:
116 case Instruction::Load:
121 unsigned NumConstants = (BitSize + 63) / 64;
122 unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123 return (Cost <= NumConstants * TTI::TCC_Basic)
124 ? static_cast<unsigned>(TTI::TCC_Free)
127 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
130 unsigned AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131 const APInt &Imm, Type *Ty) {
132 assert(Ty->isIntegerTy());
134 unsigned BitSize = Ty->getPrimitiveSizeInBits();
135 // There is no cost model for constants with a bit size of 0. Return TCC_Free
136 // here, so that constant hoisting will ignore this constant.
138 return TTI::TCC_Free;
142 return TTI::TCC_Free;
143 case Intrinsic::sadd_with_overflow:
144 case Intrinsic::uadd_with_overflow:
145 case Intrinsic::ssub_with_overflow:
146 case Intrinsic::usub_with_overflow:
147 case Intrinsic::smul_with_overflow:
148 case Intrinsic::umul_with_overflow:
150 unsigned NumConstants = (BitSize + 63) / 64;
151 unsigned Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152 return (Cost <= NumConstants * TTI::TCC_Basic)
153 ? static_cast<unsigned>(TTI::TCC_Free)
157 case Intrinsic::experimental_stackmap:
158 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159 return TTI::TCC_Free;
161 case Intrinsic::experimental_patchpoint_void:
162 case Intrinsic::experimental_patchpoint_i64:
163 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164 return TTI::TCC_Free;
167 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
170 TargetTransformInfo::PopcntSupportKind
171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173 if (TyWidth == 32 || TyWidth == 64)
174 return TTI::PSK_FastHardware;
175 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176 return TTI::PSK_Software;
179 unsigned AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst,
181 int ISD = TLI->InstructionOpcodeToISD(Opcode);
182 assert(ISD && "Invalid opcode");
184 EVT SrcTy = TLI->getValueType(DL, Src);
185 EVT DstTy = TLI->getValueType(DL, Dst);
187 if (!SrcTy.isSimple() || !DstTy.isSimple())
188 return BaseT::getCastInstrCost(Opcode, Dst, Src);
190 static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
191 // LowerVectorINT_TO_FP:
192 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
193 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
194 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
195 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
196 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
197 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
200 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
201 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
202 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
203 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
204 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
205 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
208 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
209 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
210 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
211 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
214 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
215 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
216 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
217 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
218 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
219 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
222 // LowerVectorFP_TO_INT
223 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
224 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
225 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
226 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
227 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
228 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
230 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
231 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
232 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
233 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
234 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
235 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
236 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
238 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
239 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
240 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
241 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
242 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
244 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
245 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
246 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
247 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
248 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
249 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
250 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
253 int Idx = ConvertCostTableLookup<MVT>(
254 ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
255 SrcTy.getSimpleVT());
257 return ConversionTbl[Idx].Cost;
259 return BaseT::getCastInstrCost(Opcode, Dst, Src);
262 unsigned AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
264 assert(Val->isVectorTy() && "This must be a vector type");
267 // Legalize the type.
268 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
270 // This type is legalized to a scalar type.
271 if (!LT.second.isVector())
274 // The type may be split. Normalize the index to the new type.
275 unsigned Width = LT.second.getVectorNumElements();
276 Index = Index % Width;
278 // The element at index zero is already inside the vector.
283 // All other insert/extracts cost this much.
287 unsigned AArch64TTIImpl::getArithmeticInstrCost(
288 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
289 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
290 TTI::OperandValueProperties Opd2PropInfo) {
291 // Legalize the type.
292 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
294 int ISD = TLI->InstructionOpcodeToISD(Opcode);
296 if (ISD == ISD::SDIV &&
297 Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
298 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
299 // On AArch64, scalar signed division by constants power-of-two are
300 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
301 // The OperandValue properties many not be same as that of previous
302 // operation; conservatively assume OP_None.
304 getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
305 TargetTransformInfo::OP_None,
306 TargetTransformInfo::OP_None);
307 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
308 TargetTransformInfo::OP_None,
309 TargetTransformInfo::OP_None);
310 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
311 TargetTransformInfo::OP_None,
312 TargetTransformInfo::OP_None);
313 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
314 TargetTransformInfo::OP_None,
315 TargetTransformInfo::OP_None);
321 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
322 Opd1PropInfo, Opd2PropInfo);
328 // These nodes are marked as 'custom' for combining purposes only.
329 // We know that they are legal. See LowerAdd in ISelLowering.
334 unsigned AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
335 // Address computations in vectorized code with non-consecutive addresses will
336 // likely result in more instructions compared to scalar code where the
337 // computation can more often be merged into the index mode. The resulting
338 // extra micro-ops can significantly decrease throughput.
339 unsigned NumVectorInstToHideOverhead = 10;
341 if (Ty->isVectorTy() && IsComplex)
342 return NumVectorInstToHideOverhead;
344 // In many cases the address computation is not merged into the instruction
349 unsigned AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
352 int ISD = TLI->InstructionOpcodeToISD(Opcode);
353 // We don't lower vector selects well that are wider than the register width.
354 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
355 // We would need this many instructions to hide the scalarization happening.
356 const unsigned AmortizationCost = 20;
357 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
358 VectorSelectTbl[] = {
359 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
360 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
361 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
362 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
363 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
364 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
367 EVT SelCondTy = TLI->getValueType(DL, CondTy);
368 EVT SelValTy = TLI->getValueType(DL, ValTy);
369 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
371 ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
372 SelValTy.getSimpleVT());
374 return VectorSelectTbl[Idx].Cost;
377 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
380 unsigned AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
382 unsigned AddressSpace) {
383 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
385 if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
386 Src->getVectorElementType()->isIntegerTy(64)) {
387 // Unaligned stores are extremely inefficient. We don't split
388 // unaligned v2i64 stores because the negative impact that has shown in
389 // practice on inlined memcpy code.
390 // We make v2i64 stores expensive so that we will only vectorize if there
391 // are 6 other instructions getting vectorized.
392 unsigned AmortizationCost = 6;
394 return LT.first * 2 * AmortizationCost;
397 if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
398 Src->getVectorNumElements() < 8) {
399 // We scalarize the loads/stores because there is not v.4b register and we
400 // have to promote the elements to v.4h.
401 unsigned NumVecElts = Src->getVectorNumElements();
402 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
403 // We generate 2 instructions per vector element.
404 return NumVectorizableInstsToAmortize * NumVecElts * 2;
410 unsigned AArch64TTIImpl::getInterleavedMemoryOpCost(
411 unsigned Opcode, Type *VecTy, unsigned Factor, ArrayRef<unsigned> Indices,
412 unsigned Alignment, unsigned AddressSpace) {
413 assert(Factor >= 2 && "Invalid interleave factor");
414 assert(isa<VectorType>(VecTy) && "Expect a vector type");
416 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
417 unsigned NumElts = VecTy->getVectorNumElements();
418 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
419 unsigned SubVecSize = DL.getTypeAllocSize(SubVecTy);
421 // ldN/stN only support legal vector types of size 64 or 128 in bits.
422 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
426 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
427 Alignment, AddressSpace);
430 unsigned AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
432 for (auto *I : Tys) {
433 if (!I->isVectorTy())
435 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
436 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
437 getMemoryOpCost(Instruction::Load, I, 128, 0);
442 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
443 if (ST->isCortexA57())
448 void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
449 TTI::UnrollingPreferences &UP) {
450 // Enable partial unrolling and runtime unrolling.
451 BaseT::getUnrollingPreferences(L, UP);
453 // For inner loop, it is more likely to be a hot one, and the runtime check
454 // can be promoted out from LICM pass, so the overhead is less, let's try
455 // a larger threshold to unroll more loops.
456 if (L->getLoopDepth() > 1)
457 UP.PartialThreshold *= 2;
459 // Disable partial & runtime unrolling on -Os.
460 UP.PartialOptSizeThreshold = 0;
463 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
464 Type *ExpectedType) {
465 switch (Inst->getIntrinsicID()) {
468 case Intrinsic::aarch64_neon_st2:
469 case Intrinsic::aarch64_neon_st3:
470 case Intrinsic::aarch64_neon_st4: {
471 // Create a struct type
472 StructType *ST = dyn_cast<StructType>(ExpectedType);
475 unsigned NumElts = Inst->getNumArgOperands() - 1;
476 if (ST->getNumElements() != NumElts)
478 for (unsigned i = 0, e = NumElts; i != e; ++i) {
479 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
482 Value *Res = UndefValue::get(ExpectedType);
483 IRBuilder<> Builder(Inst);
484 for (unsigned i = 0, e = NumElts; i != e; ++i) {
485 Value *L = Inst->getArgOperand(i);
486 Res = Builder.CreateInsertValue(Res, L, i);
490 case Intrinsic::aarch64_neon_ld2:
491 case Intrinsic::aarch64_neon_ld3:
492 case Intrinsic::aarch64_neon_ld4:
493 if (Inst->getType() == ExpectedType)
499 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
500 MemIntrinsicInfo &Info) {
501 switch (Inst->getIntrinsicID()) {
504 case Intrinsic::aarch64_neon_ld2:
505 case Intrinsic::aarch64_neon_ld3:
506 case Intrinsic::aarch64_neon_ld4:
508 Info.WriteMem = false;
511 Info.PtrVal = Inst->getArgOperand(0);
513 case Intrinsic::aarch64_neon_st2:
514 case Intrinsic::aarch64_neon_st3:
515 case Intrinsic::aarch64_neon_st4:
516 Info.ReadMem = false;
517 Info.WriteMem = true;
520 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
524 switch (Inst->getIntrinsicID()) {
527 case Intrinsic::aarch64_neon_ld2:
528 case Intrinsic::aarch64_neon_st2:
529 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
531 case Intrinsic::aarch64_neon_ld3:
532 case Intrinsic::aarch64_neon_st3:
533 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
535 case Intrinsic::aarch64_neon_ld4:
536 case Intrinsic::aarch64_neon_st4:
537 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;