1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "x86tti"
19 #include "X86TargetMachine.h"
20 #include "llvm/Analysis/TargetTransformInfo.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/CostTable.h"
24 #include "llvm/Target/TargetLowering.h"
27 // Declare the pass initialization routine locally as target-specific passes
28 // don't havve a target-wide initialization entry point, and so we rely on the
29 // pass constructor initialization.
31 void initializeX86TTIPass(PassRegistry &);
36 class X86TTI final : public ImmutablePass, public TargetTransformInfo {
37 const X86Subtarget *ST;
38 const X86TargetLowering *TLI;
40 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
41 /// are set if the result needs to be inserted and/or extracted from vectors.
42 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
45 X86TTI() : ImmutablePass(ID), ST(0), TLI(0) {
46 llvm_unreachable("This pass cannot be directly constructed");
49 X86TTI(const X86TargetMachine *TM)
50 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
51 TLI(TM->getTargetLowering()) {
52 initializeX86TTIPass(*PassRegistry::getPassRegistry());
55 void initializePass() override {
59 void getAnalysisUsage(AnalysisUsage &AU) const override {
60 TargetTransformInfo::getAnalysisUsage(AU);
63 /// Pass identification.
66 /// Provide necessary pointer adjustments for the two base classes.
67 void *getAdjustedAnalysisPointer(const void *ID) override {
68 if (ID == &TargetTransformInfo::ID)
69 return (TargetTransformInfo*)this;
73 /// \name Scalar TTI Implementations
75 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
79 /// \name Vector TTI Implementations
82 unsigned getNumberOfRegisters(bool Vector) const override;
83 unsigned getRegisterBitWidth(bool Vector) const override;
84 unsigned getMaximumUnrollFactor() const override;
85 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
86 OperandValueKind) const override;
87 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
88 int Index, Type *SubTp) const override;
89 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
90 Type *Src) const override;
91 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
92 Type *CondTy) const override;
93 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
94 unsigned Index) const override;
95 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
96 unsigned AddressSpace) const override;
98 unsigned getAddressComputationCost(Type *PtrTy,
99 bool IsComplex) const override;
101 unsigned getReductionCost(unsigned Opcode, Type *Ty,
102 bool IsPairwiseForm) const override;
104 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
106 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
107 Type *Ty) const override;
108 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
109 Type *Ty) const override;
114 } // end anonymous namespace
116 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
117 "X86 Target Transform Info", true, true, false)
121 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
122 return new X86TTI(TM);
126 //===----------------------------------------------------------------------===//
130 //===----------------------------------------------------------------------===//
132 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
133 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
134 // TODO: Currently the __builtin_popcount() implementation using SSE3
135 // instructions is inefficient. Once the problem is fixed, we should
136 // call ST->hasSSE3() instead of ST->hasPOPCNT().
137 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
140 unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
141 if (Vector && !ST->hasSSE1())
149 unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
151 if (ST->hasAVX()) return 256;
152 if (ST->hasSSE1()) return 128;
162 unsigned X86TTI::getMaximumUnrollFactor() const {
166 // Sandybridge and Haswell have multiple execution ports and pipelined
174 unsigned X86TTI::getArithmeticInstrCost(unsigned Opcode, Type *Ty,
175 OperandValueKind Op1Info,
176 OperandValueKind Op2Info) const {
177 // Legalize the type.
178 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
180 int ISD = TLI->InstructionOpcodeToISD(Opcode);
181 assert(ISD && "Invalid opcode");
183 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
184 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
185 // customize them to detect the cases where shift amount is a scalar one.
186 { ISD::SHL, MVT::v4i32, 1 },
187 { ISD::SRL, MVT::v4i32, 1 },
188 { ISD::SRA, MVT::v4i32, 1 },
189 { ISD::SHL, MVT::v8i32, 1 },
190 { ISD::SRL, MVT::v8i32, 1 },
191 { ISD::SRA, MVT::v8i32, 1 },
192 { ISD::SHL, MVT::v2i64, 1 },
193 { ISD::SRL, MVT::v2i64, 1 },
194 { ISD::SHL, MVT::v4i64, 1 },
195 { ISD::SRL, MVT::v4i64, 1 },
197 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
198 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
200 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
201 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
203 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
204 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
205 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
207 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
208 { ISD::SDIV, MVT::v32i8, 32*20 },
209 { ISD::SDIV, MVT::v16i16, 16*20 },
210 { ISD::SDIV, MVT::v8i32, 8*20 },
211 { ISD::SDIV, MVT::v4i64, 4*20 },
212 { ISD::UDIV, MVT::v32i8, 32*20 },
213 { ISD::UDIV, MVT::v16i16, 16*20 },
214 { ISD::UDIV, MVT::v8i32, 8*20 },
215 { ISD::UDIV, MVT::v4i64, 4*20 },
218 // Look for AVX2 lowering tricks.
220 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
221 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
222 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
223 // On AVX2, a packed v16i16 shift left by a constant build_vector
224 // is lowered into a vector multiply (vpmullw).
227 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
229 return LT.first * AVX2CostTable[Idx].Cost;
232 static const CostTblEntry<MVT::SimpleValueType>
233 SSE2UniformConstCostTable[] = {
234 // We don't correctly identify costs of casts because they are marked as
236 // Constant splats are cheaper for the following instructions.
237 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
238 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
239 { ISD::SHL, MVT::v4i32, 1 }, // pslld
240 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
242 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
243 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
244 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
245 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
247 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
248 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
249 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
252 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
254 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
256 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
259 if (ISD == ISD::SHL &&
260 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
262 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
263 (VT == MVT::v4i32 && ST->hasSSE41()))
264 // Vector shift left by non uniform constant can be lowered
265 // into vector multiply (pmullw/pmulld).
267 if (VT == MVT::v4i32 && ST->hasSSE2())
268 // A vector shift left by non uniform constant is converted
269 // into a vector multiply; the new multiply is eventually
270 // lowered into a sequence of shuffles and 2 x pmuludq.
274 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
275 // We don't correctly identify costs of casts because they are marked as
277 // For some cases, where the shift amount is a scalar we would be able
278 // to generate better code. Unfortunately, when this is the case the value
279 // (the splat) will get hoisted out of the loop, thereby making it invisible
280 // to ISel. The cost model must return worst case assumptions because it is
281 // used for vectorization and we don't want to make vectorized code worse
283 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
284 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
285 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
286 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
287 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
289 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
290 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
291 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
292 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
294 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
295 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
296 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
297 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
299 // It is not a good idea to vectorize division. We have to scalarize it and
300 // in the process we will often end up having to spilling regular
301 // registers. The overhead of division is going to dominate most kernels
302 // anyways so try hard to prevent vectorization of division - it is
303 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
304 // to hide "20 cycles" for each lane.
305 { ISD::SDIV, MVT::v16i8, 16*20 },
306 { ISD::SDIV, MVT::v8i16, 8*20 },
307 { ISD::SDIV, MVT::v4i32, 4*20 },
308 { ISD::SDIV, MVT::v2i64, 2*20 },
309 { ISD::UDIV, MVT::v16i8, 16*20 },
310 { ISD::UDIV, MVT::v8i16, 8*20 },
311 { ISD::UDIV, MVT::v4i32, 4*20 },
312 { ISD::UDIV, MVT::v2i64, 2*20 },
316 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
318 return LT.first * SSE2CostTable[Idx].Cost;
321 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
322 // We don't have to scalarize unsupported ops. We can issue two half-sized
323 // operations and we only need to extract the upper YMM half.
324 // Two ops + 1 extract + 1 insert = 4.
325 { ISD::MUL, MVT::v16i16, 4 },
326 { ISD::MUL, MVT::v8i32, 4 },
327 { ISD::SUB, MVT::v8i32, 4 },
328 { ISD::ADD, MVT::v8i32, 4 },
329 { ISD::SUB, MVT::v4i64, 4 },
330 { ISD::ADD, MVT::v4i64, 4 },
331 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
332 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
333 // Because we believe v4i64 to be a legal type, we must also include the
334 // split factor of two in the cost table. Therefore, the cost here is 18
336 { ISD::MUL, MVT::v4i64, 18 },
339 // Look for AVX1 lowering tricks.
340 if (ST->hasAVX() && !ST->hasAVX2()) {
343 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
344 // sequence of extract + two vector multiply + insert.
345 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
346 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
349 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
351 return LT.first * AVX1CostTable[Idx].Cost;
354 // Custom lowering of vectors.
355 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
356 // A v2i64/v4i64 and multiply is custom lowered as a series of long
357 // multiplies(3), shifts(4) and adds(2).
358 { ISD::MUL, MVT::v2i64, 9 },
359 { ISD::MUL, MVT::v4i64, 9 },
361 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
363 return LT.first * CustomLowered[Idx].Cost;
365 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
366 // 2x pmuludq, 2x shuffle.
367 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
371 // Fallback to the default implementation.
372 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
376 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
378 // We only estimate the cost of reverse shuffles.
379 if (Kind != SK_Reverse)
380 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
382 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
384 if (LT.second.getSizeInBits() > 128)
385 Cost = 3; // Extract + insert + copy.
387 // Multiple by the number of parts.
388 return Cost * LT.first;
391 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
392 int ISD = TLI->InstructionOpcodeToISD(Opcode);
393 assert(ISD && "Invalid opcode");
395 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
396 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
398 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
400 // These are somewhat magic numbers justified by looking at the output of
401 // Intel's IACA, running some kernels and making sure when we take
402 // legalization into account the throughput will be overestimated.
403 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
404 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
405 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
406 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
407 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
408 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
409 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
410 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
411 // There are faster sequences for float conversions.
412 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
413 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
414 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
415 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
416 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
417 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
418 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
419 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
422 if (ST->hasSSE2() && !ST->hasAVX()) {
424 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
426 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
429 EVT SrcTy = TLI->getValueType(Src);
430 EVT DstTy = TLI->getValueType(Dst);
432 // The function getSimpleVT only handles simple value types.
433 if (!SrcTy.isSimple() || !DstTy.isSimple())
434 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
436 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
437 AVX2ConversionTbl[] = {
438 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
439 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
440 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
441 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
442 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
443 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
444 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
445 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
446 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
447 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
448 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
449 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
450 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
451 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
452 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
453 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
455 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
456 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
457 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
458 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
459 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
460 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
463 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
464 AVXConversionTbl[] = {
465 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
466 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
467 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
468 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
469 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
470 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
471 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
472 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
473 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
474 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
475 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
476 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
477 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
478 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
479 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
480 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
482 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
483 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
484 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
485 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
486 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
487 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
488 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
490 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
491 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
492 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
493 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
494 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
495 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
496 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
497 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
498 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
499 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
500 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
501 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
503 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
504 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
505 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
506 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
507 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
508 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
509 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
510 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
511 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
512 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
513 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
514 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
516 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 1 },
517 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
521 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
522 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
524 return AVX2ConversionTbl[Idx].Cost;
528 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
529 SrcTy.getSimpleVT());
531 return AVXConversionTbl[Idx].Cost;
534 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
537 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
538 Type *CondTy) const {
539 // Legalize the type.
540 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
544 int ISD = TLI->InstructionOpcodeToISD(Opcode);
545 assert(ISD && "Invalid opcode");
547 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
548 { ISD::SETCC, MVT::v2f64, 1 },
549 { ISD::SETCC, MVT::v4f32, 1 },
550 { ISD::SETCC, MVT::v2i64, 1 },
551 { ISD::SETCC, MVT::v4i32, 1 },
552 { ISD::SETCC, MVT::v8i16, 1 },
553 { ISD::SETCC, MVT::v16i8, 1 },
556 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
557 { ISD::SETCC, MVT::v4f64, 1 },
558 { ISD::SETCC, MVT::v8f32, 1 },
559 // AVX1 does not support 8-wide integer compare.
560 { ISD::SETCC, MVT::v4i64, 4 },
561 { ISD::SETCC, MVT::v8i32, 4 },
562 { ISD::SETCC, MVT::v16i16, 4 },
563 { ISD::SETCC, MVT::v32i8, 4 },
566 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
567 { ISD::SETCC, MVT::v4i64, 1 },
568 { ISD::SETCC, MVT::v8i32, 1 },
569 { ISD::SETCC, MVT::v16i16, 1 },
570 { ISD::SETCC, MVT::v32i8, 1 },
574 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
576 return LT.first * AVX2CostTbl[Idx].Cost;
580 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
582 return LT.first * AVX1CostTbl[Idx].Cost;
585 if (ST->hasSSE42()) {
586 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
588 return LT.first * SSE42CostTbl[Idx].Cost;
591 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
594 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
595 unsigned Index) const {
596 assert(Val->isVectorTy() && "This must be a vector type");
599 // Legalize the type.
600 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
602 // This type is legalized to a scalar type.
603 if (!LT.second.isVector())
606 // The type may be split. Normalize the index to the new type.
607 unsigned Width = LT.second.getVectorNumElements();
608 Index = Index % Width;
610 // Floating point scalars are already located in index #0.
611 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
615 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
618 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
619 bool Extract) const {
620 assert (Ty->isVectorTy() && "Can only scalarize vectors");
623 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
625 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
627 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
633 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
634 unsigned AddressSpace) const {
635 // Handle non-power-of-two vectors such as <3 x float>
636 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
637 unsigned NumElem = VTy->getVectorNumElements();
639 // Handle a few common cases:
641 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
642 // Cost = 64 bit store + extract + 32 bit store.
646 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
647 // Cost = 128 bit store + unpack + 64 bit store.
650 // Assume that all other non-power-of-two numbers are scalarized.
651 if (!isPowerOf2_32(NumElem)) {
652 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
653 VTy->getScalarType(),
656 unsigned SplitCost = getScalarizationOverhead(Src,
657 Opcode == Instruction::Load,
658 Opcode==Instruction::Store);
659 return NumElem * Cost + SplitCost;
663 // Legalize the type.
664 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
665 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
668 // Each load/store unit costs 1.
669 unsigned Cost = LT.first * 1;
671 // On Sandybridge 256bit load/stores are double pumped
672 // (but not on Haswell).
673 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
679 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
680 // Address computations in vectorized code with non-consecutive addresses will
681 // likely result in more instructions compared to scalar code where the
682 // computation can more often be merged into the index mode. The resulting
683 // extra micro-ops can significantly decrease throughput.
684 unsigned NumVectorInstToHideOverhead = 10;
686 if (Ty->isVectorTy() && IsComplex)
687 return NumVectorInstToHideOverhead;
689 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
692 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
693 bool IsPairwise) const {
695 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
699 int ISD = TLI->InstructionOpcodeToISD(Opcode);
700 assert(ISD && "Invalid opcode");
702 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
703 // and make it as the cost.
705 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
706 { ISD::FADD, MVT::v2f64, 2 },
707 { ISD::FADD, MVT::v4f32, 4 },
708 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
709 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
710 { ISD::ADD, MVT::v8i16, 5 },
713 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
714 { ISD::FADD, MVT::v4f32, 4 },
715 { ISD::FADD, MVT::v4f64, 5 },
716 { ISD::FADD, MVT::v8f32, 7 },
717 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
718 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
719 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
720 { ISD::ADD, MVT::v8i16, 5 },
721 { ISD::ADD, MVT::v8i32, 5 },
724 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
725 { ISD::FADD, MVT::v2f64, 2 },
726 { ISD::FADD, MVT::v4f32, 4 },
727 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
728 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
729 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
732 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
733 { ISD::FADD, MVT::v4f32, 3 },
734 { ISD::FADD, MVT::v4f64, 3 },
735 { ISD::FADD, MVT::v8f32, 4 },
736 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
737 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
738 { ISD::ADD, MVT::v4i64, 3 },
739 { ISD::ADD, MVT::v8i16, 4 },
740 { ISD::ADD, MVT::v8i32, 5 },
745 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
747 return LT.first * AVX1CostTblPairWise[Idx].Cost;
750 if (ST->hasSSE42()) {
751 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
753 return LT.first * SSE42CostTblPairWise[Idx].Cost;
757 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
759 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
762 if (ST->hasSSE42()) {
763 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
765 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
769 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
772 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
773 assert(Ty->isIntegerTy());
775 unsigned BitSize = Ty->getPrimitiveSizeInBits();
782 if (Imm.getBitWidth() <= 64 &&
783 (isInt<32>(Imm.getSExtValue()) || isUInt<32>(Imm.getZExtValue())))
786 return 2 * TCC_Basic;
789 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
791 assert(Ty->isIntegerTy());
793 unsigned BitSize = Ty->getPrimitiveSizeInBits();
797 unsigned ImmIdx = ~0U;
799 default: return TCC_Free;
800 case Instruction::GetElementPtr:
802 return 2 * TCC_Basic;
804 case Instruction::Store:
807 case Instruction::Add:
808 case Instruction::Sub:
809 case Instruction::Mul:
810 case Instruction::UDiv:
811 case Instruction::SDiv:
812 case Instruction::URem:
813 case Instruction::SRem:
814 case Instruction::Shl:
815 case Instruction::LShr:
816 case Instruction::AShr:
817 case Instruction::And:
818 case Instruction::Or:
819 case Instruction::Xor:
820 case Instruction::ICmp:
823 case Instruction::Trunc:
824 case Instruction::ZExt:
825 case Instruction::SExt:
826 case Instruction::IntToPtr:
827 case Instruction::PtrToInt:
828 case Instruction::BitCast:
829 case Instruction::PHI:
830 case Instruction::Call:
831 case Instruction::Select:
832 case Instruction::Ret:
833 case Instruction::Load:
837 if ((Idx == ImmIdx) &&
838 Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
841 return X86TTI::getIntImmCost(Imm, Ty);
844 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
845 const APInt &Imm, Type *Ty) const {
846 assert(Ty->isIntegerTy());
848 unsigned BitSize = Ty->getPrimitiveSizeInBits();
853 default: return TCC_Free;
854 case Intrinsic::sadd_with_overflow:
855 case Intrinsic::uadd_with_overflow:
856 case Intrinsic::ssub_with_overflow:
857 case Intrinsic::usub_with_overflow:
858 case Intrinsic::smul_with_overflow:
859 case Intrinsic::umul_with_overflow:
860 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
863 case Intrinsic::experimental_stackmap:
864 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
867 case Intrinsic::experimental_patchpoint_void:
868 case Intrinsic::experimental_patchpoint_i64:
869 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
873 return X86TTI::getIntImmCost(Imm, Ty);