1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "X86TargetMachine.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "x86tti"
28 // Declare the pass initialization routine locally as target-specific passes
29 // don't have a target-wide initialization entry point, and so we rely on the
30 // pass constructor initialization.
32 void initializeX86TTIPass(PassRegistry &);
37 class X86TTI final : public ImmutablePass, public TargetTransformInfo {
38 const X86Subtarget *ST;
39 const X86TargetLowering *TLI;
41 /// Estimate the overhead of scalarizing an instruction. Insert and Extract
42 /// are set if the result needs to be inserted and/or extracted from vectors.
43 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) const;
46 X86TTI() : ImmutablePass(ID), ST(nullptr), TLI(nullptr) {
47 llvm_unreachable("This pass cannot be directly constructed");
50 X86TTI(const X86TargetMachine *TM)
51 : ImmutablePass(ID), ST(TM->getSubtargetImpl()),
52 TLI(TM->getSubtargetImpl()->getTargetLowering()) {
53 initializeX86TTIPass(*PassRegistry::getPassRegistry());
56 void initializePass() override {
60 void getAnalysisUsage(AnalysisUsage &AU) const override {
61 TargetTransformInfo::getAnalysisUsage(AU);
64 /// Pass identification.
67 /// Provide necessary pointer adjustments for the two base classes.
68 void *getAdjustedAnalysisPointer(const void *ID) override {
69 if (ID == &TargetTransformInfo::ID)
70 return (TargetTransformInfo*)this;
74 /// \name Scalar TTI Implementations
76 PopcntSupportKind getPopcntSupport(unsigned TyWidth) const override;
80 /// \name Vector TTI Implementations
83 unsigned getNumberOfRegisters(bool Vector) const override;
84 unsigned getRegisterBitWidth(bool Vector) const override;
85 unsigned getMaxInterleaveFactor() const override;
86 unsigned getArithmeticInstrCost(unsigned Opcode, Type *Ty, OperandValueKind,
87 OperandValueKind, OperandValueProperties,
88 OperandValueProperties) const override;
89 unsigned getShuffleCost(ShuffleKind Kind, Type *Tp,
90 int Index, Type *SubTp) const override;
91 unsigned getCastInstrCost(unsigned Opcode, Type *Dst,
92 Type *Src) const override;
93 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
94 Type *CondTy) const override;
95 unsigned getVectorInstrCost(unsigned Opcode, Type *Val,
96 unsigned Index) const override;
97 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
98 unsigned AddressSpace) const override;
100 unsigned getAddressComputationCost(Type *PtrTy,
101 bool IsComplex) const override;
103 unsigned getReductionCost(unsigned Opcode, Type *Ty,
104 bool IsPairwiseForm) const override;
106 unsigned getIntImmCost(int64_t) const;
108 unsigned getIntImmCost(const APInt &Imm, Type *Ty) const override;
110 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
111 Type *Ty) const override;
112 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
113 Type *Ty) const override;
118 } // end anonymous namespace
120 INITIALIZE_AG_PASS(X86TTI, TargetTransformInfo, "x86tti",
121 "X86 Target Transform Info", true, true, false)
125 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
126 return new X86TTI(TM);
130 //===----------------------------------------------------------------------===//
134 //===----------------------------------------------------------------------===//
136 X86TTI::PopcntSupportKind X86TTI::getPopcntSupport(unsigned TyWidth) const {
137 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
138 // TODO: Currently the __builtin_popcount() implementation using SSE3
139 // instructions is inefficient. Once the problem is fixed, we should
140 // call ST->hasSSE3() instead of ST->hasPOPCNT().
141 return ST->hasPOPCNT() ? PSK_FastHardware : PSK_Software;
144 unsigned X86TTI::getNumberOfRegisters(bool Vector) const {
145 if (Vector && !ST->hasSSE1())
149 if (Vector && ST->hasAVX512())
156 unsigned X86TTI::getRegisterBitWidth(bool Vector) const {
158 if (ST->hasAVX512()) return 512;
159 if (ST->hasAVX()) return 256;
160 if (ST->hasSSE1()) return 128;
170 unsigned X86TTI::getMaxInterleaveFactor() const {
174 // Sandybridge and Haswell have multiple execution ports and pipelined
182 unsigned X86TTI::getArithmeticInstrCost(
183 unsigned Opcode, Type *Ty, OperandValueKind Op1Info,
184 OperandValueKind Op2Info, OperandValueProperties Opd1PropInfo,
185 OperandValueProperties Opd2PropInfo) const {
186 // Legalize the type.
187 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
189 int ISD = TLI->InstructionOpcodeToISD(Opcode);
190 assert(ISD && "Invalid opcode");
192 if (ISD == ISD::SDIV &&
193 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
194 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
195 // On X86, vector signed division by constants power-of-two are
196 // normally expanded to the sequence SRA + SRL + ADD + SRA.
197 // The OperandValue properties many not be same as that of previous
198 // operation;conservatively assume OP_None.
200 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
201 TargetTransformInfo::OP_None,
202 TargetTransformInfo::OP_None);
203 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
204 TargetTransformInfo::OP_None,
205 TargetTransformInfo::OP_None);
206 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
207 TargetTransformInfo::OP_None,
208 TargetTransformInfo::OP_None);
213 static const CostTblEntry<MVT::SimpleValueType>
214 AVX2UniformConstCostTable[] = {
215 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
216 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
217 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
218 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
221 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
223 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
225 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
228 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
229 { ISD::SHL, MVT::v16i32, 1 },
230 { ISD::SRL, MVT::v16i32, 1 },
231 { ISD::SRA, MVT::v16i32, 1 },
232 { ISD::SHL, MVT::v8i64, 1 },
233 { ISD::SRL, MVT::v8i64, 1 },
234 { ISD::SRA, MVT::v8i64, 1 },
237 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
238 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
239 // customize them to detect the cases where shift amount is a scalar one.
240 { ISD::SHL, MVT::v4i32, 1 },
241 { ISD::SRL, MVT::v4i32, 1 },
242 { ISD::SRA, MVT::v4i32, 1 },
243 { ISD::SHL, MVT::v8i32, 1 },
244 { ISD::SRL, MVT::v8i32, 1 },
245 { ISD::SRA, MVT::v8i32, 1 },
246 { ISD::SHL, MVT::v2i64, 1 },
247 { ISD::SRL, MVT::v2i64, 1 },
248 { ISD::SHL, MVT::v4i64, 1 },
249 { ISD::SRL, MVT::v4i64, 1 },
251 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
252 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
254 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
255 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
257 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
258 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
259 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
261 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
262 { ISD::SDIV, MVT::v32i8, 32*20 },
263 { ISD::SDIV, MVT::v16i16, 16*20 },
264 { ISD::SDIV, MVT::v8i32, 8*20 },
265 { ISD::SDIV, MVT::v4i64, 4*20 },
266 { ISD::UDIV, MVT::v32i8, 32*20 },
267 { ISD::UDIV, MVT::v16i16, 16*20 },
268 { ISD::UDIV, MVT::v8i32, 8*20 },
269 { ISD::UDIV, MVT::v4i64, 4*20 },
272 if (ST->hasAVX512()) {
273 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
275 return LT.first * AVX512CostTable[Idx].Cost;
277 // Look for AVX2 lowering tricks.
279 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
280 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
281 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
282 // On AVX2, a packed v16i16 shift left by a constant build_vector
283 // is lowered into a vector multiply (vpmullw).
286 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
288 return LT.first * AVX2CostTable[Idx].Cost;
291 static const CostTblEntry<MVT::SimpleValueType>
292 SSE2UniformConstCostTable[] = {
293 // We don't correctly identify costs of casts because they are marked as
295 // Constant splats are cheaper for the following instructions.
296 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
297 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
298 { ISD::SHL, MVT::v4i32, 1 }, // pslld
299 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
301 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
302 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
303 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
304 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
306 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
307 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
308 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
310 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
311 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
312 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
313 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
316 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
319 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
320 return LT.first * 15;
322 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
324 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
327 if (ISD == ISD::SHL &&
328 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
330 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
331 (VT == MVT::v4i32 && ST->hasSSE41()))
332 // Vector shift left by non uniform constant can be lowered
333 // into vector multiply (pmullw/pmulld).
335 if (VT == MVT::v4i32 && ST->hasSSE2())
336 // A vector shift left by non uniform constant is converted
337 // into a vector multiply; the new multiply is eventually
338 // lowered into a sequence of shuffles and 2 x pmuludq.
342 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
343 // We don't correctly identify costs of casts because they are marked as
345 // For some cases, where the shift amount is a scalar we would be able
346 // to generate better code. Unfortunately, when this is the case the value
347 // (the splat) will get hoisted out of the loop, thereby making it invisible
348 // to ISel. The cost model must return worst case assumptions because it is
349 // used for vectorization and we don't want to make vectorized code worse
351 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
352 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
353 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
354 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
355 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
357 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
358 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
359 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
360 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
362 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
363 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
364 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
365 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
367 // It is not a good idea to vectorize division. We have to scalarize it and
368 // in the process we will often end up having to spilling regular
369 // registers. The overhead of division is going to dominate most kernels
370 // anyways so try hard to prevent vectorization of division - it is
371 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
372 // to hide "20 cycles" for each lane.
373 { ISD::SDIV, MVT::v16i8, 16*20 },
374 { ISD::SDIV, MVT::v8i16, 8*20 },
375 { ISD::SDIV, MVT::v4i32, 4*20 },
376 { ISD::SDIV, MVT::v2i64, 2*20 },
377 { ISD::UDIV, MVT::v16i8, 16*20 },
378 { ISD::UDIV, MVT::v8i16, 8*20 },
379 { ISD::UDIV, MVT::v4i32, 4*20 },
380 { ISD::UDIV, MVT::v2i64, 2*20 },
384 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
386 return LT.first * SSE2CostTable[Idx].Cost;
389 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
390 // We don't have to scalarize unsupported ops. We can issue two half-sized
391 // operations and we only need to extract the upper YMM half.
392 // Two ops + 1 extract + 1 insert = 4.
393 { ISD::MUL, MVT::v16i16, 4 },
394 { ISD::MUL, MVT::v8i32, 4 },
395 { ISD::SUB, MVT::v8i32, 4 },
396 { ISD::ADD, MVT::v8i32, 4 },
397 { ISD::SUB, MVT::v4i64, 4 },
398 { ISD::ADD, MVT::v4i64, 4 },
399 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
400 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
401 // Because we believe v4i64 to be a legal type, we must also include the
402 // split factor of two in the cost table. Therefore, the cost here is 18
404 { ISD::MUL, MVT::v4i64, 18 },
407 // Look for AVX1 lowering tricks.
408 if (ST->hasAVX() && !ST->hasAVX2()) {
411 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
412 // sequence of extract + two vector multiply + insert.
413 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
414 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
417 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
419 return LT.first * AVX1CostTable[Idx].Cost;
422 // Custom lowering of vectors.
423 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
424 // A v2i64/v4i64 and multiply is custom lowered as a series of long
425 // multiplies(3), shifts(4) and adds(2).
426 { ISD::MUL, MVT::v2i64, 9 },
427 { ISD::MUL, MVT::v4i64, 9 },
429 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
431 return LT.first * CustomLowered[Idx].Cost;
433 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
434 // 2x pmuludq, 2x shuffle.
435 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
439 // Fallback to the default implementation.
440 return TargetTransformInfo::getArithmeticInstrCost(Opcode, Ty, Op1Info,
444 unsigned X86TTI::getShuffleCost(ShuffleKind Kind, Type *Tp, int Index,
446 // We only estimate the cost of reverse and alternate shuffles.
447 if (Kind != SK_Reverse && Kind != SK_Alternate)
448 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
450 if (Kind == SK_Reverse) {
451 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
453 if (LT.second.getSizeInBits() > 128)
454 Cost = 3; // Extract + insert + copy.
456 // Multiple by the number of parts.
457 return Cost * LT.first;
460 if (Kind == SK_Alternate) {
461 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
462 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
463 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
465 // The backend knows how to generate a single VEX.256 version of
466 // instruction VPBLENDW if the target supports AVX2.
467 if (ST->hasAVX2() && LT.second == MVT::v16i16)
470 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
471 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
472 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
474 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
475 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
477 // This shuffle is custom lowered into a sequence of:
478 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
479 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
481 // This shuffle is custom lowered into a long sequence of:
482 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
483 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
487 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
489 return LT.first * AVXAltShuffleTbl[Idx].Cost;
492 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
493 // These are lowered into movsd.
494 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
495 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
497 // packed float vectors with four elements are lowered into BLENDI dag
498 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
499 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
500 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
502 // This shuffle generates a single pshufw.
503 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
505 // There is no instruction that matches a v16i8 alternate shuffle.
506 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
507 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
510 if (ST->hasSSE41()) {
511 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
513 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
516 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
517 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
518 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
520 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
521 // the sequence 'shufps + pshufd'
522 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
523 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
525 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
526 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
529 if (ST->hasSSSE3()) {
530 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
532 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
535 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
536 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
537 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
539 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
540 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
542 // This is expanded into a long sequence of four extract + four insert.
543 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
545 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
546 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
549 // Fall-back (SSE3 and SSE2).
550 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
552 return LT.first * SSEAltShuffleTbl[Idx].Cost;
553 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
556 return TargetTransformInfo::getShuffleCost(Kind, Tp, Index, SubTp);
559 unsigned X86TTI::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) const {
560 int ISD = TLI->InstructionOpcodeToISD(Opcode);
561 assert(ISD && "Invalid opcode");
563 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
564 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
566 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
568 // These are somewhat magic numbers justified by looking at the output of
569 // Intel's IACA, running some kernels and making sure when we take
570 // legalization into account the throughput will be overestimated.
571 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
572 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
573 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
574 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
575 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
576 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
577 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
578 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
579 // There are faster sequences for float conversions.
580 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
581 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
582 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
583 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
584 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
585 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
586 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
587 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
590 if (ST->hasSSE2() && !ST->hasAVX()) {
592 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
594 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
597 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
598 AVX512ConversionTbl[] = {
599 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
600 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
601 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
602 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
604 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
605 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
606 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
607 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
608 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
610 // v16i1 -> v16i32 - load + broadcast
611 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
612 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
614 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
615 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
616 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
617 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
618 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
619 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
623 if (ST->hasAVX512()) {
624 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
627 return AVX512ConversionTbl[Idx].Cost;
629 EVT SrcTy = TLI->getValueType(Src);
630 EVT DstTy = TLI->getValueType(Dst);
632 // The function getSimpleVT only handles simple value types.
633 if (!SrcTy.isSimple() || !DstTy.isSimple())
634 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
636 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
637 AVX2ConversionTbl[] = {
638 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
639 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
640 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
641 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
642 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
643 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
644 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
645 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
646 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
647 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
648 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
649 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
650 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
651 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
652 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
653 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
655 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
656 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
657 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
658 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
659 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
660 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
662 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
663 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
665 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
668 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
669 AVXConversionTbl[] = {
670 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
671 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
672 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
673 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
674 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
675 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
676 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
677 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
678 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
679 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
680 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
681 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
682 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
683 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
684 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
685 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
687 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
688 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
689 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
690 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
691 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
692 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
693 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
695 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
696 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
697 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
698 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
699 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
700 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
701 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
702 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
703 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
704 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
705 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
706 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
708 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
709 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
710 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
711 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
712 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
713 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
714 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
715 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
716 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
717 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
718 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
719 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
720 // The generic code to compute the scalar overhead is currently broken.
721 // Workaround this limitation by estimating the scalarization overhead
722 // here. We have roughly 10 instructions per scalar element.
723 // Multiply that by the vector width.
724 // FIXME: remove that when PR19268 is fixed.
725 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
726 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
728 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
729 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
730 // This node is expanded into scalarized operations but BasicTTI is overly
731 // optimistic estimating its cost. It computes 3 per element (one
732 // vector-extract, one scalar conversion and one vector-insert). The
733 // problem is that the inserts form a read-modify-write chain so latency
734 // should be factored in too. Inflating the cost per element by 1.
735 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
736 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
740 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
741 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
743 return AVX2ConversionTbl[Idx].Cost;
747 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
748 SrcTy.getSimpleVT());
750 return AVXConversionTbl[Idx].Cost;
753 return TargetTransformInfo::getCastInstrCost(Opcode, Dst, Src);
756 unsigned X86TTI::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
757 Type *CondTy) const {
758 // Legalize the type.
759 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
763 int ISD = TLI->InstructionOpcodeToISD(Opcode);
764 assert(ISD && "Invalid opcode");
766 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
767 { ISD::SETCC, MVT::v2f64, 1 },
768 { ISD::SETCC, MVT::v4f32, 1 },
769 { ISD::SETCC, MVT::v2i64, 1 },
770 { ISD::SETCC, MVT::v4i32, 1 },
771 { ISD::SETCC, MVT::v8i16, 1 },
772 { ISD::SETCC, MVT::v16i8, 1 },
775 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
776 { ISD::SETCC, MVT::v4f64, 1 },
777 { ISD::SETCC, MVT::v8f32, 1 },
778 // AVX1 does not support 8-wide integer compare.
779 { ISD::SETCC, MVT::v4i64, 4 },
780 { ISD::SETCC, MVT::v8i32, 4 },
781 { ISD::SETCC, MVT::v16i16, 4 },
782 { ISD::SETCC, MVT::v32i8, 4 },
785 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
786 { ISD::SETCC, MVT::v4i64, 1 },
787 { ISD::SETCC, MVT::v8i32, 1 },
788 { ISD::SETCC, MVT::v16i16, 1 },
789 { ISD::SETCC, MVT::v32i8, 1 },
792 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
793 { ISD::SETCC, MVT::v8i64, 1 },
794 { ISD::SETCC, MVT::v16i32, 1 },
795 { ISD::SETCC, MVT::v8f64, 1 },
796 { ISD::SETCC, MVT::v16f32, 1 },
799 if (ST->hasAVX512()) {
800 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
802 return LT.first * AVX512CostTbl[Idx].Cost;
806 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
808 return LT.first * AVX2CostTbl[Idx].Cost;
812 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
814 return LT.first * AVX1CostTbl[Idx].Cost;
817 if (ST->hasSSE42()) {
818 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
820 return LT.first * SSE42CostTbl[Idx].Cost;
823 return TargetTransformInfo::getCmpSelInstrCost(Opcode, ValTy, CondTy);
826 unsigned X86TTI::getVectorInstrCost(unsigned Opcode, Type *Val,
827 unsigned Index) const {
828 assert(Val->isVectorTy() && "This must be a vector type");
831 // Legalize the type.
832 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
834 // This type is legalized to a scalar type.
835 if (!LT.second.isVector())
838 // The type may be split. Normalize the index to the new type.
839 unsigned Width = LT.second.getVectorNumElements();
840 Index = Index % Width;
842 // Floating point scalars are already located in index #0.
843 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
847 return TargetTransformInfo::getVectorInstrCost(Opcode, Val, Index);
850 unsigned X86TTI::getScalarizationOverhead(Type *Ty, bool Insert,
851 bool Extract) const {
852 assert (Ty->isVectorTy() && "Can only scalarize vectors");
855 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
857 Cost += TopTTI->getVectorInstrCost(Instruction::InsertElement, Ty, i);
859 Cost += TopTTI->getVectorInstrCost(Instruction::ExtractElement, Ty, i);
865 unsigned X86TTI::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
866 unsigned AddressSpace) const {
867 // Handle non-power-of-two vectors such as <3 x float>
868 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
869 unsigned NumElem = VTy->getVectorNumElements();
871 // Handle a few common cases:
873 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
874 // Cost = 64 bit store + extract + 32 bit store.
878 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
879 // Cost = 128 bit store + unpack + 64 bit store.
882 // Assume that all other non-power-of-two numbers are scalarized.
883 if (!isPowerOf2_32(NumElem)) {
884 unsigned Cost = TargetTransformInfo::getMemoryOpCost(Opcode,
885 VTy->getScalarType(),
888 unsigned SplitCost = getScalarizationOverhead(Src,
889 Opcode == Instruction::Load,
890 Opcode==Instruction::Store);
891 return NumElem * Cost + SplitCost;
895 // Legalize the type.
896 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
897 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
900 // Each load/store unit costs 1.
901 unsigned Cost = LT.first * 1;
903 // On Sandybridge 256bit load/stores are double pumped
904 // (but not on Haswell).
905 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
911 unsigned X86TTI::getAddressComputationCost(Type *Ty, bool IsComplex) const {
912 // Address computations in vectorized code with non-consecutive addresses will
913 // likely result in more instructions compared to scalar code where the
914 // computation can more often be merged into the index mode. The resulting
915 // extra micro-ops can significantly decrease throughput.
916 unsigned NumVectorInstToHideOverhead = 10;
918 if (Ty->isVectorTy() && IsComplex)
919 return NumVectorInstToHideOverhead;
921 return TargetTransformInfo::getAddressComputationCost(Ty, IsComplex);
924 unsigned X86TTI::getReductionCost(unsigned Opcode, Type *ValTy,
925 bool IsPairwise) const {
927 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
931 int ISD = TLI->InstructionOpcodeToISD(Opcode);
932 assert(ISD && "Invalid opcode");
934 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
935 // and make it as the cost.
937 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
938 { ISD::FADD, MVT::v2f64, 2 },
939 { ISD::FADD, MVT::v4f32, 4 },
940 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
941 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
942 { ISD::ADD, MVT::v8i16, 5 },
945 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
946 { ISD::FADD, MVT::v4f32, 4 },
947 { ISD::FADD, MVT::v4f64, 5 },
948 { ISD::FADD, MVT::v8f32, 7 },
949 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
950 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
951 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
952 { ISD::ADD, MVT::v8i16, 5 },
953 { ISD::ADD, MVT::v8i32, 5 },
956 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
957 { ISD::FADD, MVT::v2f64, 2 },
958 { ISD::FADD, MVT::v4f32, 4 },
959 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
960 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
961 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
964 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
965 { ISD::FADD, MVT::v4f32, 3 },
966 { ISD::FADD, MVT::v4f64, 3 },
967 { ISD::FADD, MVT::v8f32, 4 },
968 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
969 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
970 { ISD::ADD, MVT::v4i64, 3 },
971 { ISD::ADD, MVT::v8i16, 4 },
972 { ISD::ADD, MVT::v8i32, 5 },
977 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
979 return LT.first * AVX1CostTblPairWise[Idx].Cost;
982 if (ST->hasSSE42()) {
983 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
985 return LT.first * SSE42CostTblPairWise[Idx].Cost;
989 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
991 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
994 if (ST->hasSSE42()) {
995 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
997 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
1001 return TargetTransformInfo::getReductionCost(Opcode, ValTy, IsPairwise);
1004 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1005 /// method might only calculate a fraction of a larger immediate. Therefore it
1006 /// is valid to return a cost of ZERO.
1007 unsigned X86TTI::getIntImmCost(int64_t Val) const {
1014 return 2 * TCC_Basic;
1017 unsigned X86TTI::getIntImmCost(const APInt &Imm, Type *Ty) const {
1018 assert(Ty->isIntegerTy());
1020 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1024 // Never hoist constants larger than 128bit, because this might lead to
1025 // incorrect code generation or assertions in codegen.
1026 // Fixme: Create a cost model for types larger than i128 once the codegen
1027 // issues have been fixed.
1034 // Sign-extend all constants to a multiple of 64-bit.
1037 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1039 // Split the constant into 64-bit chunks and calculate the cost for each
1042 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1043 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1044 int64_t Val = Tmp.getSExtValue();
1045 Cost += getIntImmCost(Val);
1047 // We need at least one instruction to materialze the constant.
1048 return std::max(1U, Cost);
1051 unsigned X86TTI::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1053 assert(Ty->isIntegerTy());
1055 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1056 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1057 // here, so that constant hoisting will ignore this constant.
1061 unsigned ImmIdx = ~0U;
1063 default: return TCC_Free;
1064 case Instruction::GetElementPtr:
1065 // Always hoist the base address of a GetElementPtr. This prevents the
1066 // creation of new constants for every base constant that gets constant
1067 // folded with the offset.
1069 return 2 * TCC_Basic;
1071 case Instruction::Store:
1074 case Instruction::Add:
1075 case Instruction::Sub:
1076 case Instruction::Mul:
1077 case Instruction::UDiv:
1078 case Instruction::SDiv:
1079 case Instruction::URem:
1080 case Instruction::SRem:
1081 case Instruction::And:
1082 case Instruction::Or:
1083 case Instruction::Xor:
1084 case Instruction::ICmp:
1087 // Always return TCC_Free for the shift value of a shift instruction.
1088 case Instruction::Shl:
1089 case Instruction::LShr:
1090 case Instruction::AShr:
1094 case Instruction::Trunc:
1095 case Instruction::ZExt:
1096 case Instruction::SExt:
1097 case Instruction::IntToPtr:
1098 case Instruction::PtrToInt:
1099 case Instruction::BitCast:
1100 case Instruction::PHI:
1101 case Instruction::Call:
1102 case Instruction::Select:
1103 case Instruction::Ret:
1104 case Instruction::Load:
1108 if (Idx == ImmIdx) {
1109 unsigned NumConstants = (BitSize + 63) / 64;
1110 unsigned Cost = X86TTI::getIntImmCost(Imm, Ty);
1111 return (Cost <= NumConstants * TCC_Basic)
1112 ? static_cast<unsigned>(TCC_Free)
1116 return X86TTI::getIntImmCost(Imm, Ty);
1119 unsigned X86TTI::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1120 const APInt &Imm, Type *Ty) const {
1121 assert(Ty->isIntegerTy());
1123 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1124 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1125 // here, so that constant hoisting will ignore this constant.
1130 default: return TCC_Free;
1131 case Intrinsic::sadd_with_overflow:
1132 case Intrinsic::uadd_with_overflow:
1133 case Intrinsic::ssub_with_overflow:
1134 case Intrinsic::usub_with_overflow:
1135 case Intrinsic::smul_with_overflow:
1136 case Intrinsic::umul_with_overflow:
1137 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1140 case Intrinsic::experimental_stackmap:
1141 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1144 case Intrinsic::experimental_patchpoint_void:
1145 case Intrinsic::experimental_patchpoint_i64:
1146 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1150 return X86TTI::getIntImmCost(Imm, Ty);