1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "X86TargetMachine.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "x86tti"
28 // Declare the pass initialization routine locally as target-specific passes
29 // don't have a target-wide initialization entry point, and so we rely on the
30 // pass constructor initialization.
32 void initializeX86TTIPass(PassRegistry &);
37 class X86TTI final : public ImmutablePass, public TargetTransformInfo {
38 const X86Subtarget *ST;
39 const X86TargetLowering *TLI;
41 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
42 /// are set if the result needs to be inserted and/or extracted from vectors.
43 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
46 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
47 llvm_unreachable("This pass cannot be directly constructed");
50 X86TTI(const X86TargetMachine *TM)
51 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
52 TLI(TM->getTargetLowering()) {
53 initializeX86TTIPass(*PassRegistry::getPassRegistry());
56 void initializePass() override {
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 TargetTransformInfo::getAnalysisUsage(AU);
64 /// Pass identification.
67 /// Provide necessary pointer adjustments for the two base classes.
68 void *getAdjustedAnalysisPointer(const void *ID) override {
69 if (ID == &TargetTransformInfo::ID)
70 return (TargetTransformInfo*)this;
74 /// \name Scalar TTI Implementations
76 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
80 /// \name Vector TTI Implementations
83 unsigned getNumberOfRegisters(bool Vector) const override;
84 unsigned getRegisterBitWidth(bool Vector) const override;
85 unsigned getMaximumUnrollFactor() const override;
86 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
87 OperandValueKind) const override;
88 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
89 int Index, Type *SubTp) const override;
90 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
91 Type *Src) const override;
92 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
93 Type *CondTy) const override;
94 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
95 unsigned Index) const override;
96 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
97 unsigned AddressSpace) const override;
99 unsigned getAddressComputationCost(Type *PtrTy,
100 bool IsComplex) const override;
102 unsigned getReductionCost(unsigned Opcode, Type *Ty,
103 bool IsPairwiseForm) const override;
105 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
107 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
108 Type *Ty) const override;
109 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
110 Type *Ty) const override;
115 } // end anonymous namespace
117 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
118 "X86 Target Transform Info", true, true, false)
122 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
123 return new X86TTI(TM);
127 //===----------------------------------------------------------------------===//
131 //===----------------------------------------------------------------------===//
133 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
134 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
135 // TODO: Currently the __builtin_popcount() implementation using SSE3
136 // instructions is inefficient. Once the problem is fixed, we should
137 // call ST->hasSSE3() instead of ST->hasPOPCNT().
138 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
141 unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
142 if (Vector && !ST->hasSSE1())
150 unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
152 if (ST->hasAVX()) return 256;
153 if (ST->hasSSE1()) return 128;
163 unsigned X86TTI::getMaximumUnrollFactor() const {
167 // Sandybridge and Haswell have multiple execution ports and pipelined
175 unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
176 OperandValueKind Op1Info,
177 OperandValueKind Op2Info) const {
178 // Legalize the type.
179 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
181 int ISD = TLI->InstructionOpcodeToISD(Opcode);
182 assert(ISD && "Invalid opcode");
184 static const CostTblEntry<MVT::SimpleValueType>
185 AVX2UniformConstCostTable[] = {
186 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
187 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
188 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
189 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
192 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
194 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
196 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
199 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
200 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
201 // customize them to detect the cases where shift amount is a scalar one.
202 { ISD::SHL, MVT::v4i32, 1 },
203 { ISD::SRL, MVT::v4i32, 1 },
204 { ISD::SRA, MVT::v4i32, 1 },
205 { ISD::SHL, MVT::v8i32, 1 },
206 { ISD::SRL, MVT::v8i32, 1 },
207 { ISD::SRA, MVT::v8i32, 1 },
208 { ISD::SHL, MVT::v2i64, 1 },
209 { ISD::SRL, MVT::v2i64, 1 },
210 { ISD::SHL, MVT::v4i64, 1 },
211 { ISD::SRL, MVT::v4i64, 1 },
213 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
214 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
216 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
217 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
219 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
220 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
221 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
223 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
224 { ISD::SDIV, MVT::v32i8, 32*20 },
225 { ISD::SDIV, MVT::v16i16, 16*20 },
226 { ISD::SDIV, MVT::v8i32, 8*20 },
227 { ISD::SDIV, MVT::v4i64, 4*20 },
228 { ISD::UDIV, MVT::v32i8, 32*20 },
229 { ISD::UDIV, MVT::v16i16, 16*20 },
230 { ISD::UDIV, MVT::v8i32, 8*20 },
231 { ISD::UDIV, MVT::v4i64, 4*20 },
234 // Look for AVX2 lowering tricks.
236 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
237 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
238 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
239 // On AVX2, a packed v16i16 shift left by a constant build_vector
240 // is lowered into a vector multiply (vpmullw).
243 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
245 return LT.first * AVX2CostTable[Idx].Cost;
248 static const CostTblEntry<MVT::SimpleValueType>
249 SSE2UniformConstCostTable[] = {
250 // We don't correctly identify costs of casts because they are marked as
252 // Constant splats are cheaper for the following instructions.
253 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
254 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
255 { ISD::SHL, MVT::v4i32, 1 }, // pslld
256 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
258 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
259 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
260 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
263 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
264 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
265 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
267 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
268 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
269 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
270 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
273 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
276 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
277 return LT.first * 15;
279 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
281 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
284 if (ISD == ISD::SHL &&
285 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
287 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
288 (VT == MVT::v4i32 && ST->hasSSE41()))
289 // Vector shift left by non uniform constant can be lowered
290 // into vector multiply (pmullw/pmulld).
292 if (VT == MVT::v4i32 && ST->hasSSE2())
293 // A vector shift left by non uniform constant is converted
294 // into a vector multiply; the new multiply is eventually
295 // lowered into a sequence of shuffles and 2 x pmuludq.
299 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
300 // We don't correctly identify costs of casts because they are marked as
302 // For some cases, where the shift amount is a scalar we would be able
303 // to generate better code. Unfortunately, when this is the case the value
304 // (the splat) will get hoisted out of the loop, thereby making it invisible
305 // to ISel. The cost model must return worst case assumptions because it is
306 // used for vectorization and we don't want to make vectorized code worse
308 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
309 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
310 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
311 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
312 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
314 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
315 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
316 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
317 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
319 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
320 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
321 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
322 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
324 // It is not a good idea to vectorize division. We have to scalarize it and
325 // in the process we will often end up having to spilling regular
326 // registers. The overhead of division is going to dominate most kernels
327 // anyways so try hard to prevent vectorization of division - it is
328 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
329 // to hide "20 cycles" for each lane.
330 { ISD::SDIV, MVT::v16i8, 16*20 },
331 { ISD::SDIV, MVT::v8i16, 8*20 },
332 { ISD::SDIV, MVT::v4i32, 4*20 },
333 { ISD::SDIV, MVT::v2i64, 2*20 },
334 { ISD::UDIV, MVT::v16i8, 16*20 },
335 { ISD::UDIV, MVT::v8i16, 8*20 },
336 { ISD::UDIV, MVT::v4i32, 4*20 },
337 { ISD::UDIV, MVT::v2i64, 2*20 },
341 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
343 return LT.first * SSE2CostTable[Idx].Cost;
346 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
347 // We don't have to scalarize unsupported ops. We can issue two half-sized
348 // operations and we only need to extract the upper YMM half.
349 // Two ops + 1 extract + 1 insert = 4.
350 { ISD::MUL, MVT::v16i16, 4 },
351 { ISD::MUL, MVT::v8i32, 4 },
352 { ISD::SUB, MVT::v8i32, 4 },
353 { ISD::ADD, MVT::v8i32, 4 },
354 { ISD::SUB, MVT::v4i64, 4 },
355 { ISD::ADD, MVT::v4i64, 4 },
356 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
357 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
358 // Because we believe v4i64 to be a legal type, we must also include the
359 // split factor of two in the cost table. Therefore, the cost here is 18
361 { ISD::MUL, MVT::v4i64, 18 },
364 // Look for AVX1 lowering tricks.
365 if (ST->hasAVX() && !ST->hasAVX2()) {
368 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
369 // sequence of extract + two vector multiply + insert.
370 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
371 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
374 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
376 return LT.first * AVX1CostTable[Idx].Cost;
379 // Custom lowering of vectors.
380 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
381 // A v2i64/v4i64 and multiply is custom lowered as a series of long
382 // multiplies(3), shifts(4) and adds(2).
383 { ISD::MUL, MVT::v2i64, 9 },
384 { ISD::MUL, MVT::v4i64, 9 },
386 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
388 return LT.first * CustomLowered[Idx].Cost;
390 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
391 // 2x pmuludq, 2x shuffle.
392 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
396 // Fallback to the default implementation.
397 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
401 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
403 // We only estimate the cost of reverse shuffles.
404 if (Kind != SK_Reverse)
405 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
407 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
409 if (LT.second.getSizeInBits() > 128)
410 Cost = 3; // Extract + insert + copy.
412 // Multiple by the number of parts.
413 return Cost * LT.first;
416 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
417 int ISD = TLI->InstructionOpcodeToISD(Opcode);
418 assert(ISD && "Invalid opcode");
420 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
421 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
423 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
425 // These are somewhat magic numbers justified by looking at the output of
426 // Intel's IACA, running some kernels and making sure when we take
427 // legalization into account the throughput will be overestimated.
428 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
429 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
430 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
431 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
432 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
433 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
434 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
435 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
436 // There are faster sequences for float conversions.
437 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
438 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
439 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
440 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
441 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
442 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
443 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
444 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
447 if (ST->hasSSE2() && !ST->hasAVX()) {
449 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
451 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
454 EVT SrcTy = TLI->getValueType(Src);
455 EVT DstTy = TLI->getValueType(Dst);
457 // The function getSimpleVT only handles simple value types.
458 if (!SrcTy.isSimple() || !DstTy.isSimple())
459 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
461 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
462 AVX2ConversionTbl[] = {
463 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
464 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
465 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
466 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
467 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
468 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
469 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
470 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
471 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
472 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
473 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
474 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
475 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
476 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
477 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
478 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
480 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
481 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
482 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
483 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
484 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
485 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
488 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
489 AVXConversionTbl[] = {
490 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
491 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
492 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
493 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
494 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
495 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
496 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
497 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
498 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
499 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
500 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
501 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
502 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
503 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
504 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
505 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
507 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
508 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
509 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
510 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
511 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
512 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
513 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
515 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
516 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
517 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
518 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
519 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
520 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
521 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
522 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
523 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
524 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
525 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
526 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
528 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
529 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
530 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
531 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
532 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
533 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
534 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
535 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
536 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
537 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
538 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
539 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
540 // The generic code to compute the scalar overhead is currently broken.
541 // Workaround this limitation by estimating the scalarization overhead
542 // here. We have roughly 10 instructions per scalar element.
543 // Multiply that by the vector width.
544 // FIXME: remove that when PR19268 is fixed.
545 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
546 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
548 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
549 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
550 // This node is expanded into scalarized operations but BasicTTI is overly
551 // optimistic estimating its cost. It computes 3 per element (one
552 // vector-extract, one scalar conversion and one vector-insert). The
553 // problem is that the inserts form a read-modify-write chain so latency
554 // should be factored in too. Inflating the cost per element by 1.
555 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
556 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
560 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
561 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
563 return AVX2ConversionTbl[Idx].Cost;
567 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
568 SrcTy.getSimpleVT());
570 return AVXConversionTbl[Idx].Cost;
573 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
576 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
577 Type *CondTy) const {
578 // Legalize the type.
579 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
583 int ISD = TLI->InstructionOpcodeToISD(Opcode);
584 assert(ISD && "Invalid opcode");
586 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
587 { ISD::SETCC, MVT::v2f64, 1 },
588 { ISD::SETCC, MVT::v4f32, 1 },
589 { ISD::SETCC, MVT::v2i64, 1 },
590 { ISD::SETCC, MVT::v4i32, 1 },
591 { ISD::SETCC, MVT::v8i16, 1 },
592 { ISD::SETCC, MVT::v16i8, 1 },
595 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
596 { ISD::SETCC, MVT::v4f64, 1 },
597 { ISD::SETCC, MVT::v8f32, 1 },
598 // AVX1 does not support 8-wide integer compare.
599 { ISD::SETCC, MVT::v4i64, 4 },
600 { ISD::SETCC, MVT::v8i32, 4 },
601 { ISD::SETCC, MVT::v16i16, 4 },
602 { ISD::SETCC, MVT::v32i8, 4 },
605 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
606 { ISD::SETCC, MVT::v4i64, 1 },
607 { ISD::SETCC, MVT::v8i32, 1 },
608 { ISD::SETCC, MVT::v16i16, 1 },
609 { ISD::SETCC, MVT::v32i8, 1 },
613 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
615 return LT.first * AVX2CostTbl[Idx].Cost;
619 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
621 return LT.first * AVX1CostTbl[Idx].Cost;
624 if (ST->hasSSE42()) {
625 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
627 return LT.first * SSE42CostTbl[Idx].Cost;
630 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
633 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
634 unsigned Index) const {
635 assert(Val->isVectorTy() && "This must be a vector type");
638 // Legalize the type.
639 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
641 // This type is legalized to a scalar type.
642 if (!LT.second.isVector())
645 // The type may be split. Normalize the index to the new type.
646 unsigned Width = LT.second.getVectorNumElements();
647 Index = Index % Width;
649 // Floating point scalars are already located in index #0.
650 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
654 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
657 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
658 bool Extract) const {
659 assert (Ty->isVectorTy() && "Can only scalarize vectors");
662 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
664 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
666 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
672 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
673 unsigned AddressSpace) const {
674 // Handle non-power-of-two vectors such as <3 x float>
675 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
676 unsigned NumElem = VTy->getVectorNumElements();
678 // Handle a few common cases:
680 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
681 // Cost = 64 bit store + extract + 32 bit store.
685 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
686 // Cost = 128 bit store + unpack + 64 bit store.
689 // Assume that all other non-power-of-two numbers are scalarized.
690 if (!isPowerOf2_32(NumElem)) {
691 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
692 VTy->getScalarType(),
695 unsigned SplitCost = getScalarizationOverhead(Src,
696 Opcode == Instruction::Load,
697 Opcode==Instruction::Store);
698 return NumElem * Cost + SplitCost;
702 // Legalize the type.
703 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
704 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
707 // Each load/store unit costs 1.
708 unsigned Cost = LT.first * 1;
710 // On Sandybridge 256bit load/stores are double pumped
711 // (but not on Haswell).
712 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
718 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
719 // Address computations in vectorized code with non-consecutive addresses will
720 // likely result in more instructions compared to scalar code where the
721 // computation can more often be merged into the index mode. The resulting
722 // extra micro-ops can significantly decrease throughput.
723 unsigned NumVectorInstToHideOverhead = 10;
725 if (Ty->isVectorTy() && IsComplex)
726 return NumVectorInstToHideOverhead;
728 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
731 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
732 bool IsPairwise) const {
734 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
738 int ISD = TLI->InstructionOpcodeToISD(Opcode);
739 assert(ISD && "Invalid opcode");
741 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
742 // and make it as the cost.
744 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
745 { ISD::FADD, MVT::v2f64, 2 },
746 { ISD::FADD, MVT::v4f32, 4 },
747 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
748 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
749 { ISD::ADD, MVT::v8i16, 5 },
752 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
753 { ISD::FADD, MVT::v4f32, 4 },
754 { ISD::FADD, MVT::v4f64, 5 },
755 { ISD::FADD, MVT::v8f32, 7 },
756 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
757 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
758 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
759 { ISD::ADD, MVT::v8i16, 5 },
760 { ISD::ADD, MVT::v8i32, 5 },
763 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
764 { ISD::FADD, MVT::v2f64, 2 },
765 { ISD::FADD, MVT::v4f32, 4 },
766 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
767 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
768 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
771 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
772 { ISD::FADD, MVT::v4f32, 3 },
773 { ISD::FADD, MVT::v4f64, 3 },
774 { ISD::FADD, MVT::v8f32, 4 },
775 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
776 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
777 { ISD::ADD, MVT::v4i64, 3 },
778 { ISD::ADD, MVT::v8i16, 4 },
779 { ISD::ADD, MVT::v8i32, 5 },
784 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
786 return LT.first * AVX1CostTblPairWise[Idx].Cost;
789 if (ST->hasSSE42()) {
790 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
792 return LT.first * SSE42CostTblPairWise[Idx].Cost;
796 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
798 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
801 if (ST->hasSSE42()) {
802 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
804 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
808 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
811 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
812 assert(Ty->isIntegerTy());
814 unsigned BitSize = Ty->getPrimitiveSizeInBits();
818 // Never hoist constants larger than 128bit, because this might lead to
819 // incorrect code generation or assertions in codegen.
820 // Fixme: Create a cost model for types larger than i128 once the codegen
821 // issues have been fixed.
828 if (Imm.getBitWidth() <= 64 &&
829 (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
832 return 2 * TCC_Basic;
835 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
837 assert(Ty->isIntegerTy());
839 unsigned BitSize = Ty->getPrimitiveSizeInBits();
840 // There is no cost model for constants with a bit size of 0. Return TCC_Free
841 // here, so that constant hoisting will ignore this constant.
845 unsigned ImmIdx = ~0U;
847 default: return TCC_Free;
848 case Instruction::GetElementPtr:
849 // Always hoist the base address of a GetElementPtr. This prevents the
850 // creation of new constants for every base constant that gets constant
851 // folded with the offset.
853 return 2 * TCC_Basic;
855 case Instruction::Store:
858 case Instruction::Add:
859 case Instruction::Sub:
860 case Instruction::Mul:
861 case Instruction::UDiv:
862 case Instruction::SDiv:
863 case Instruction::URem:
864 case Instruction::SRem:
865 case Instruction::And:
866 case Instruction::Or:
867 case Instruction::Xor:
868 case Instruction::ICmp:
871 // Always return TCC_Free for the shift value of a shift instruction.
872 case Instruction::Shl:
873 case Instruction::LShr:
874 case Instruction::AShr:
878 case Instruction::Trunc:
879 case Instruction::ZExt:
880 case Instruction::SExt:
881 case Instruction::IntToPtr:
882 case Instruction::PtrToInt:
883 case Instruction::BitCast:
884 case Instruction::PHI:
885 case Instruction::Call:
886 case Instruction::Select:
887 case Instruction::Ret:
888 case Instruction::Load:
892 if ((Idx == ImmIdx) &&
893 Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
896 return X86TTI::getIntImmCost(Imm, Ty);
899 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
900 const APInt &Imm, Type *Ty) const {
901 assert(Ty->isIntegerTy());
903 unsigned BitSize = Ty->getPrimitiveSizeInBits();
904 // There is no cost model for constants with a bit size of 0. Return TCC_Free
905 // here, so that constant hoisting will ignore this constant.
910 default: return TCC_Free;
911 case Intrinsic::sadd_with_overflow:
912 case Intrinsic::uadd_with_overflow:
913 case Intrinsic::ssub_with_overflow:
914 case Intrinsic::usub_with_overflow:
915 case Intrinsic::smul_with_overflow:
916 case Intrinsic::umul_with_overflow:
917 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
920 case Intrinsic::experimental_stackmap:
921 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
924 case Intrinsic::experimental_patchpoint_void:
925 case Intrinsic::experimental_patchpoint_i64:
926 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
930 return X86TTI::getIntImmCost(Imm, Ty);