1 //===-- AArch64TargetTransformInfo.cpp - AArch64 specific TTI -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 #include "AArch64TargetTransformInfo.h"
11 #include "MCTargetDesc/AArch64AddressingModes.h"
12 #include "llvm/Analysis/TargetTransformInfo.h"
13 #include "llvm/Analysis/LoopInfo.h"
14 #include "llvm/CodeGen/BasicTTIImpl.h"
15 #include "llvm/Support/Debug.h"
16 #include "llvm/Target/CostTable.h"
17 #include "llvm/Target/TargetLowering.h"
21 #define DEBUG_TYPE "aarch64tti"
23 /// \brief Calculate the cost of materializing a 64-bit value. This helper
24 /// method might only calculate a fraction of a larger immediate. Therefore it
25 /// is valid to return a cost of ZERO.
26 int AArch64TTIImpl::getIntImmCost(int64_t Val) {
27 // Check if the immediate can be encoded within an instruction.
28 if (Val == 0 || AArch64_AM::isLogicalImmediate(Val, 64))
34 // Calculate how many moves we will need to materialize this constant.
35 unsigned LZ = countLeadingZeros((uint64_t)Val);
36 return (64 - LZ + 15) / 16;
39 /// \brief Calculate the cost of materializing the given constant.
40 int AArch64TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
41 assert(Ty->isIntegerTy());
43 unsigned BitSize = Ty->getPrimitiveSizeInBits();
47 // Sign-extend all constants to a multiple of 64-bit.
50 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
52 // Split the constant into 64-bit chunks and calculate the cost for each
55 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
56 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
57 int64_t Val = Tmp.getSExtValue();
58 Cost += getIntImmCost(Val);
60 // We need at least one instruction to materialze the constant.
61 return std::max(1, Cost);
64 int AArch64TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
65 const APInt &Imm, Type *Ty) {
66 assert(Ty->isIntegerTy());
68 unsigned BitSize = Ty->getPrimitiveSizeInBits();
69 // There is no cost model for constants with a bit size of 0. Return TCC_Free
70 // here, so that constant hoisting will ignore this constant.
74 unsigned ImmIdx = ~0U;
78 case Instruction::GetElementPtr:
79 // Always hoist the base address of a GetElementPtr.
81 return 2 * TTI::TCC_Basic;
83 case Instruction::Store:
86 case Instruction::Add:
87 case Instruction::Sub:
88 case Instruction::Mul:
89 case Instruction::UDiv:
90 case Instruction::SDiv:
91 case Instruction::URem:
92 case Instruction::SRem:
93 case Instruction::And:
95 case Instruction::Xor:
96 case Instruction::ICmp:
99 // Always return TCC_Free for the shift value of a shift instruction.
100 case Instruction::Shl:
101 case Instruction::LShr:
102 case Instruction::AShr:
104 return TTI::TCC_Free;
106 case Instruction::Trunc:
107 case Instruction::ZExt:
108 case Instruction::SExt:
109 case Instruction::IntToPtr:
110 case Instruction::PtrToInt:
111 case Instruction::BitCast:
112 case Instruction::PHI:
113 case Instruction::Call:
114 case Instruction::Select:
115 case Instruction::Ret:
116 case Instruction::Load:
121 int NumConstants = (BitSize + 63) / 64;
122 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
123 return (Cost <= NumConstants * TTI::TCC_Basic)
124 ? static_cast<int>(TTI::TCC_Free)
127 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
130 int AArch64TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
131 const APInt &Imm, Type *Ty) {
132 assert(Ty->isIntegerTy());
134 unsigned BitSize = Ty->getPrimitiveSizeInBits();
135 // There is no cost model for constants with a bit size of 0. Return TCC_Free
136 // here, so that constant hoisting will ignore this constant.
138 return TTI::TCC_Free;
142 return TTI::TCC_Free;
143 case Intrinsic::sadd_with_overflow:
144 case Intrinsic::uadd_with_overflow:
145 case Intrinsic::ssub_with_overflow:
146 case Intrinsic::usub_with_overflow:
147 case Intrinsic::smul_with_overflow:
148 case Intrinsic::umul_with_overflow:
150 int NumConstants = (BitSize + 63) / 64;
151 int Cost = AArch64TTIImpl::getIntImmCost(Imm, Ty);
152 return (Cost <= NumConstants * TTI::TCC_Basic)
153 ? static_cast<int>(TTI::TCC_Free)
157 case Intrinsic::experimental_stackmap:
158 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
159 return TTI::TCC_Free;
161 case Intrinsic::experimental_patchpoint_void:
162 case Intrinsic::experimental_patchpoint_i64:
163 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
164 return TTI::TCC_Free;
167 return AArch64TTIImpl::getIntImmCost(Imm, Ty);
170 TargetTransformInfo::PopcntSupportKind
171 AArch64TTIImpl::getPopcntSupport(unsigned TyWidth) {
172 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
173 if (TyWidth == 32 || TyWidth == 64)
174 return TTI::PSK_FastHardware;
175 // TODO: AArch64TargetLowering::LowerCTPOP() supports 128bit popcount.
176 return TTI::PSK_Software;
179 int AArch64TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
180 int ISD = TLI->InstructionOpcodeToISD(Opcode);
181 assert(ISD && "Invalid opcode");
183 EVT SrcTy = TLI->getValueType(DL, Src);
184 EVT DstTy = TLI->getValueType(DL, Dst);
186 if (!SrcTy.isSimple() || !DstTy.isSimple())
187 return BaseT::getCastInstrCost(Opcode, Dst, Src);
189 static const TypeConversionCostTblEntry<MVT> ConversionTbl[] = {
190 // LowerVectorINT_TO_FP:
191 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
192 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
193 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
194 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i32, 1 },
195 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
196 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 1 },
199 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
200 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
201 { ISD::SINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
202 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i8, 3 },
203 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i16, 3 },
204 { ISD::UINT_TO_FP, MVT::v2f32, MVT::v2i64, 2 },
207 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 4 },
208 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
209 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
210 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
213 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
214 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
215 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
216 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i8, 4 },
217 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i16, 4 },
218 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i32, 2 },
221 // LowerVectorFP_TO_INT
222 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f32, 1 },
223 { ISD::FP_TO_SINT, MVT::v4i32, MVT::v4f32, 1 },
224 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f64, 1 },
225 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f32, 1 },
226 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f32, 1 },
227 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f64, 1 },
229 // Complex, from v2f32: legal type is v2i32 (no cost) or v2i64 (1 ext).
230 { ISD::FP_TO_SINT, MVT::v2i64, MVT::v2f32, 2 },
231 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f32, 1 },
232 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f32, 1 },
233 { ISD::FP_TO_UINT, MVT::v2i64, MVT::v2f32, 2 },
234 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f32, 1 },
235 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f32, 1 },
237 // Complex, from v4f32: legal type is v4i16, 1 narrowing => ~2
238 { ISD::FP_TO_SINT, MVT::v4i16, MVT::v4f32, 2 },
239 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 2 },
240 { ISD::FP_TO_UINT, MVT::v4i16, MVT::v4f32, 2 },
241 { ISD::FP_TO_UINT, MVT::v4i8, MVT::v4f32, 2 },
243 // Complex, from v2f64: legal type is v2i32, 1 narrowing => ~2.
244 { ISD::FP_TO_SINT, MVT::v2i32, MVT::v2f64, 2 },
245 { ISD::FP_TO_SINT, MVT::v2i16, MVT::v2f64, 2 },
246 { ISD::FP_TO_SINT, MVT::v2i8, MVT::v2f64, 2 },
247 { ISD::FP_TO_UINT, MVT::v2i32, MVT::v2f64, 2 },
248 { ISD::FP_TO_UINT, MVT::v2i16, MVT::v2f64, 2 },
249 { ISD::FP_TO_UINT, MVT::v2i8, MVT::v2f64, 2 },
252 int Idx = ConvertCostTableLookup<MVT>(
253 ConversionTbl, array_lengthof(ConversionTbl), ISD, DstTy.getSimpleVT(),
254 SrcTy.getSimpleVT());
256 return ConversionTbl[Idx].Cost;
258 return BaseT::getCastInstrCost(Opcode, Dst, Src);
261 int AArch64TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
263 assert(Val->isVectorTy() && "This must be a vector type");
266 // Legalize the type.
267 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
269 // This type is legalized to a scalar type.
270 if (!LT.second.isVector())
273 // The type may be split. Normalize the index to the new type.
274 unsigned Width = LT.second.getVectorNumElements();
275 Index = Index % Width;
277 // The element at index zero is already inside the vector.
282 // All other insert/extracts cost this much.
286 int AArch64TTIImpl::getArithmeticInstrCost(
287 unsigned Opcode, Type *Ty, TTI::OperandValueKind Opd1Info,
288 TTI::OperandValueKind Opd2Info, TTI::OperandValueProperties Opd1PropInfo,
289 TTI::OperandValueProperties Opd2PropInfo) {
290 // Legalize the type.
291 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
293 int ISD = TLI->InstructionOpcodeToISD(Opcode);
295 if (ISD == ISD::SDIV &&
296 Opd2Info == TargetTransformInfo::OK_UniformConstantValue &&
297 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
298 // On AArch64, scalar signed division by constants power-of-two are
299 // normally expanded to the sequence ADD + CMP + SELECT + SRA.
300 // The OperandValue properties many not be same as that of previous
301 // operation; conservatively assume OP_None.
302 int Cost = getArithmeticInstrCost(Instruction::Add, Ty, Opd1Info, Opd2Info,
303 TargetTransformInfo::OP_None,
304 TargetTransformInfo::OP_None);
305 Cost += getArithmeticInstrCost(Instruction::Sub, Ty, Opd1Info, Opd2Info,
306 TargetTransformInfo::OP_None,
307 TargetTransformInfo::OP_None);
308 Cost += getArithmeticInstrCost(Instruction::Select, Ty, Opd1Info, Opd2Info,
309 TargetTransformInfo::OP_None,
310 TargetTransformInfo::OP_None);
311 Cost += getArithmeticInstrCost(Instruction::AShr, Ty, Opd1Info, Opd2Info,
312 TargetTransformInfo::OP_None,
313 TargetTransformInfo::OP_None);
319 return BaseT::getArithmeticInstrCost(Opcode, Ty, Opd1Info, Opd2Info,
320 Opd1PropInfo, Opd2PropInfo);
326 // These nodes are marked as 'custom' for combining purposes only.
327 // We know that they are legal. See LowerAdd in ISelLowering.
332 int AArch64TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
333 // Address computations in vectorized code with non-consecutive addresses will
334 // likely result in more instructions compared to scalar code where the
335 // computation can more often be merged into the index mode. The resulting
336 // extra micro-ops can significantly decrease throughput.
337 unsigned NumVectorInstToHideOverhead = 10;
339 if (Ty->isVectorTy() && IsComplex)
340 return NumVectorInstToHideOverhead;
342 // In many cases the address computation is not merged into the instruction
347 int AArch64TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
350 int ISD = TLI->InstructionOpcodeToISD(Opcode);
351 // We don't lower vector selects well that are wider than the register width.
352 if (ValTy->isVectorTy() && ISD == ISD::SELECT) {
353 // We would need this many instructions to hide the scalarization happening.
354 const int AmortizationCost = 20;
355 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
356 VectorSelectTbl[] = {
357 { ISD::SELECT, MVT::v16i1, MVT::v16i16, 16 * AmortizationCost },
358 { ISD::SELECT, MVT::v8i1, MVT::v8i32, 8 * AmortizationCost },
359 { ISD::SELECT, MVT::v16i1, MVT::v16i32, 16 * AmortizationCost },
360 { ISD::SELECT, MVT::v4i1, MVT::v4i64, 4 * AmortizationCost },
361 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost },
362 { ISD::SELECT, MVT::v16i1, MVT::v16i64, 16 * AmortizationCost }
365 EVT SelCondTy = TLI->getValueType(DL, CondTy);
366 EVT SelValTy = TLI->getValueType(DL, ValTy);
367 if (SelCondTy.isSimple() && SelValTy.isSimple()) {
369 ConvertCostTableLookup(VectorSelectTbl, ISD, SelCondTy.getSimpleVT(),
370 SelValTy.getSimpleVT());
372 return VectorSelectTbl[Idx].Cost;
375 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
378 int AArch64TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
379 unsigned Alignment, unsigned AddressSpace) {
380 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
382 if (Opcode == Instruction::Store && Src->isVectorTy() && Alignment != 16 &&
383 Src->getVectorElementType()->isIntegerTy(64)) {
384 // Unaligned stores are extremely inefficient. We don't split
385 // unaligned v2i64 stores because the negative impact that has shown in
386 // practice on inlined memcpy code.
387 // We make v2i64 stores expensive so that we will only vectorize if there
388 // are 6 other instructions getting vectorized.
389 int AmortizationCost = 6;
391 return LT.first * 2 * AmortizationCost;
394 if (Src->isVectorTy() && Src->getVectorElementType()->isIntegerTy(8) &&
395 Src->getVectorNumElements() < 8) {
396 // We scalarize the loads/stores because there is not v.4b register and we
397 // have to promote the elements to v.4h.
398 unsigned NumVecElts = Src->getVectorNumElements();
399 unsigned NumVectorizableInstsToAmortize = NumVecElts * 2;
400 // We generate 2 instructions per vector element.
401 return NumVectorizableInstsToAmortize * NumVecElts * 2;
407 int AArch64TTIImpl::getInterleavedMemoryOpCost(unsigned Opcode, Type *VecTy,
409 ArrayRef<unsigned> Indices,
411 unsigned AddressSpace) {
412 assert(Factor >= 2 && "Invalid interleave factor");
413 assert(isa<VectorType>(VecTy) && "Expect a vector type");
415 if (Factor <= TLI->getMaxSupportedInterleaveFactor()) {
416 unsigned NumElts = VecTy->getVectorNumElements();
417 Type *SubVecTy = VectorType::get(VecTy->getScalarType(), NumElts / Factor);
418 unsigned SubVecSize = DL.getTypeAllocSizeInBits(SubVecTy);
420 // ldN/stN only support legal vector types of size 64 or 128 in bits.
421 if (NumElts % Factor == 0 && (SubVecSize == 64 || SubVecSize == 128))
425 return BaseT::getInterleavedMemoryOpCost(Opcode, VecTy, Factor, Indices,
426 Alignment, AddressSpace);
429 int AArch64TTIImpl::getCostOfKeepingLiveOverCall(ArrayRef<Type *> Tys) {
431 for (auto *I : Tys) {
432 if (!I->isVectorTy())
434 if (I->getScalarSizeInBits() * I->getVectorNumElements() == 128)
435 Cost += getMemoryOpCost(Instruction::Store, I, 128, 0) +
436 getMemoryOpCost(Instruction::Load, I, 128, 0);
441 unsigned AArch64TTIImpl::getMaxInterleaveFactor(unsigned VF) {
442 if (ST->isCortexA57())
447 void AArch64TTIImpl::getUnrollingPreferences(Loop *L,
448 TTI::UnrollingPreferences &UP) {
449 // Enable partial unrolling and runtime unrolling.
450 BaseT::getUnrollingPreferences(L, UP);
452 // For inner loop, it is more likely to be a hot one, and the runtime check
453 // can be promoted out from LICM pass, so the overhead is less, let's try
454 // a larger threshold to unroll more loops.
455 if (L->getLoopDepth() > 1)
456 UP.PartialThreshold *= 2;
458 // Disable partial & runtime unrolling on -Os.
459 UP.PartialOptSizeThreshold = 0;
462 Value *AArch64TTIImpl::getOrCreateResultFromMemIntrinsic(IntrinsicInst *Inst,
463 Type *ExpectedType) {
464 switch (Inst->getIntrinsicID()) {
467 case Intrinsic::aarch64_neon_st2:
468 case Intrinsic::aarch64_neon_st3:
469 case Intrinsic::aarch64_neon_st4: {
470 // Create a struct type
471 StructType *ST = dyn_cast<StructType>(ExpectedType);
474 unsigned NumElts = Inst->getNumArgOperands() - 1;
475 if (ST->getNumElements() != NumElts)
477 for (unsigned i = 0, e = NumElts; i != e; ++i) {
478 if (Inst->getArgOperand(i)->getType() != ST->getElementType(i))
481 Value *Res = UndefValue::get(ExpectedType);
482 IRBuilder<> Builder(Inst);
483 for (unsigned i = 0, e = NumElts; i != e; ++i) {
484 Value *L = Inst->getArgOperand(i);
485 Res = Builder.CreateInsertValue(Res, L, i);
489 case Intrinsic::aarch64_neon_ld2:
490 case Intrinsic::aarch64_neon_ld3:
491 case Intrinsic::aarch64_neon_ld4:
492 if (Inst->getType() == ExpectedType)
498 bool AArch64TTIImpl::getTgtMemIntrinsic(IntrinsicInst *Inst,
499 MemIntrinsicInfo &Info) {
500 switch (Inst->getIntrinsicID()) {
503 case Intrinsic::aarch64_neon_ld2:
504 case Intrinsic::aarch64_neon_ld3:
505 case Intrinsic::aarch64_neon_ld4:
507 Info.WriteMem = false;
510 Info.PtrVal = Inst->getArgOperand(0);
512 case Intrinsic::aarch64_neon_st2:
513 case Intrinsic::aarch64_neon_st3:
514 case Intrinsic::aarch64_neon_st4:
515 Info.ReadMem = false;
516 Info.WriteMem = true;
519 Info.PtrVal = Inst->getArgOperand(Inst->getNumArgOperands() - 1);
523 switch (Inst->getIntrinsicID()) {
526 case Intrinsic::aarch64_neon_ld2:
527 case Intrinsic::aarch64_neon_st2:
528 Info.MatchingId = VECTOR_LDST_TWO_ELEMENTS;
530 case Intrinsic::aarch64_neon_ld3:
531 case Intrinsic::aarch64_neon_st3:
532 Info.MatchingId = VECTOR_LDST_THREE_ELEMENTS;
534 case Intrinsic::aarch64_neon_ld4:
535 case Intrinsic::aarch64_neon_st4:
536 Info.MatchingId = VECTOR_LDST_FOUR_ELEMENTS;