1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
18 #include "X86TargetMachine.h"
19 #include "llvm/Analysis/TargetTransformInfo.h"
20 #include "llvm/CodeGen/BasicTTIImpl.h"
21 #include "llvm/IR/IntrinsicInst.h"
22 #include "llvm/Support/Debug.h"
23 #include "llvm/Target/CostTable.h"
24 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "x86tti"
31 class X86TTIImpl : public BasicTTIImplBase<X86TTIImpl> {
32 typedef BasicTTIImplBase<X86TTIImpl> BaseT;
33 typedef TargetTransformInfo TTI;
35 const X86Subtarget *ST;
36 const X86TargetLowering *TLI;
38 unsigned getScalarizationOverhead(Type *Ty, bool Insert, bool Extract);
41 explicit X86TTIImpl(const X86TargetMachine *TM = nullptr)
42 : BaseT(TM), ST(TM ? TM->getSubtargetImpl() : nullptr),
43 TLI(ST ? ST->getTargetLowering() : nullptr) {}
45 // Provide value semantics. MSVC requires that we spell all of these out.
46 X86TTIImpl(const X86TTIImpl &Arg)
47 : BaseT(static_cast<const BaseT &>(Arg)), ST(Arg.ST), TLI(Arg.TLI) {}
48 X86TTIImpl(X86TTIImpl &&Arg)
49 : BaseT(std::move(static_cast<BaseT &>(Arg))), ST(std::move(Arg.ST)),
50 TLI(std::move(Arg.TLI)) {}
51 X86TTIImpl &operator=(const X86TTIImpl &RHS) {
52 BaseT::operator=(static_cast<const BaseT &>(RHS));
57 X86TTIImpl &operator=(X86TTIImpl &&RHS) {
58 BaseT::operator=(std::move(static_cast<BaseT &>(RHS)));
59 ST = std::move(RHS.ST);
60 TLI = std::move(RHS.TLI);
64 /// \name Scalar TTI Implementations
66 TTI::PopcntSupportKind getPopcntSupport(unsigned TyWidth);
70 /// \name Vector TTI Implementations
73 unsigned getNumberOfRegisters(bool Vector);
74 unsigned getRegisterBitWidth(bool Vector);
75 unsigned getMaxInterleaveFactor();
76 unsigned getArithmeticInstrCost(
77 unsigned Opcode, Type *Ty,
78 TTI::OperandValueKind Opd1Info = TTI::OK_AnyValue,
79 TTI::OperandValueKind Opd2Info = TTI::OK_AnyValue,
80 TTI::OperandValueProperties Opd1PropInfo = TTI::OP_None,
81 TTI::OperandValueProperties Opd2PropInfo = TTI::OP_None);
82 unsigned getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
84 unsigned getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src);
85 unsigned getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy);
86 unsigned getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index);
87 unsigned getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
88 unsigned AddressSpace);
89 unsigned getMaskedMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
90 unsigned AddressSpace);
92 unsigned getAddressComputationCost(Type *PtrTy, bool IsComplex);
94 unsigned getReductionCost(unsigned Opcode, Type *Ty, bool IsPairwiseForm);
96 unsigned getIntImmCost(int64_t);
98 unsigned getIntImmCost(const APInt &Imm, Type *Ty);
100 unsigned getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
102 unsigned getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
104 bool isLegalMaskedLoad(Type *DataType, int Consecutive);
105 bool isLegalMaskedStore(Type *DataType, int Consecutive);
110 } // end anonymous namespace
113 llvm::createX86TargetTransformInfoPass(const X86TargetMachine *TM) {
114 return new TargetTransformInfoWrapperPass(X86TTIImpl(TM));
118 //===----------------------------------------------------------------------===//
122 //===----------------------------------------------------------------------===//
124 TargetTransformInfo::PopcntSupportKind
125 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
126 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
127 // TODO: Currently the __builtin_popcount() implementation using SSE3
128 // instructions is inefficient. Once the problem is fixed, we should
129 // call ST->hasSSE3() instead of ST->hasPOPCNT().
130 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
133 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
134 if (Vector && !ST->hasSSE1())
138 if (Vector && ST->hasAVX512())
145 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
147 if (ST->hasAVX512()) return 512;
148 if (ST->hasAVX()) return 256;
149 if (ST->hasSSE1()) return 128;
159 unsigned X86TTIImpl::getMaxInterleaveFactor() {
163 // Sandybridge and Haswell have multiple execution ports and pipelined
171 unsigned X86TTIImpl::getArithmeticInstrCost(
172 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
173 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
174 TTI::OperandValueProperties Opd2PropInfo) {
175 // Legalize the type.
176 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Ty);
178 int ISD = TLI->InstructionOpcodeToISD(Opcode);
179 assert(ISD && "Invalid opcode");
181 if (ISD == ISD::SDIV &&
182 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
183 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
184 // On X86, vector signed division by constants power-of-two are
185 // normally expanded to the sequence SRA + SRL + ADD + SRA.
186 // The OperandValue properties many not be same as that of previous
187 // operation;conservatively assume OP_None.
189 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info, Op2Info,
190 TargetTransformInfo::OP_None,
191 TargetTransformInfo::OP_None);
192 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
193 TargetTransformInfo::OP_None,
194 TargetTransformInfo::OP_None);
195 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
196 TargetTransformInfo::OP_None,
197 TargetTransformInfo::OP_None);
202 static const CostTblEntry<MVT::SimpleValueType>
203 AVX2UniformConstCostTable[] = {
204 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
205 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
206 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
207 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
210 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
212 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
214 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
217 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
218 { ISD::SHL, MVT::v16i32, 1 },
219 { ISD::SRL, MVT::v16i32, 1 },
220 { ISD::SRA, MVT::v16i32, 1 },
221 { ISD::SHL, MVT::v8i64, 1 },
222 { ISD::SRL, MVT::v8i64, 1 },
223 { ISD::SRA, MVT::v8i64, 1 },
226 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
227 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
228 // customize them to detect the cases where shift amount is a scalar one.
229 { ISD::SHL, MVT::v4i32, 1 },
230 { ISD::SRL, MVT::v4i32, 1 },
231 { ISD::SRA, MVT::v4i32, 1 },
232 { ISD::SHL, MVT::v8i32, 1 },
233 { ISD::SRL, MVT::v8i32, 1 },
234 { ISD::SRA, MVT::v8i32, 1 },
235 { ISD::SHL, MVT::v2i64, 1 },
236 { ISD::SRL, MVT::v2i64, 1 },
237 { ISD::SHL, MVT::v4i64, 1 },
238 { ISD::SRL, MVT::v4i64, 1 },
240 { ISD::SHL, MVT::v32i8, 42 }, // cmpeqb sequence.
241 { ISD::SHL, MVT::v16i16, 16*10 }, // Scalarized.
243 { ISD::SRL, MVT::v32i8, 32*10 }, // Scalarized.
244 { ISD::SRL, MVT::v16i16, 8*10 }, // Scalarized.
246 { ISD::SRA, MVT::v32i8, 32*10 }, // Scalarized.
247 { ISD::SRA, MVT::v16i16, 16*10 }, // Scalarized.
248 { ISD::SRA, MVT::v4i64, 4*10 }, // Scalarized.
250 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
251 { ISD::SDIV, MVT::v32i8, 32*20 },
252 { ISD::SDIV, MVT::v16i16, 16*20 },
253 { ISD::SDIV, MVT::v8i32, 8*20 },
254 { ISD::SDIV, MVT::v4i64, 4*20 },
255 { ISD::UDIV, MVT::v32i8, 32*20 },
256 { ISD::UDIV, MVT::v16i16, 16*20 },
257 { ISD::UDIV, MVT::v8i32, 8*20 },
258 { ISD::UDIV, MVT::v4i64, 4*20 },
261 if (ST->hasAVX512()) {
262 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
264 return LT.first * AVX512CostTable[Idx].Cost;
266 // Look for AVX2 lowering tricks.
268 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
269 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
270 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
271 // On AVX2, a packed v16i16 shift left by a constant build_vector
272 // is lowered into a vector multiply (vpmullw).
275 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
277 return LT.first * AVX2CostTable[Idx].Cost;
280 static const CostTblEntry<MVT::SimpleValueType>
281 SSE2UniformConstCostTable[] = {
282 // We don't correctly identify costs of casts because they are marked as
284 // Constant splats are cheaper for the following instructions.
285 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
286 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
287 { ISD::SHL, MVT::v4i32, 1 }, // pslld
288 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
290 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
291 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
292 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
293 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
295 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
296 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
297 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
299 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
300 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
301 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
302 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
305 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
308 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
309 return LT.first * 15;
311 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
313 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
316 if (ISD == ISD::SHL &&
317 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
319 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
320 (VT == MVT::v4i32 && ST->hasSSE41()))
321 // Vector shift left by non uniform constant can be lowered
322 // into vector multiply (pmullw/pmulld).
324 if (VT == MVT::v4i32 && ST->hasSSE2())
325 // A vector shift left by non uniform constant is converted
326 // into a vector multiply; the new multiply is eventually
327 // lowered into a sequence of shuffles and 2 x pmuludq.
331 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
332 // We don't correctly identify costs of casts because they are marked as
334 // For some cases, where the shift amount is a scalar we would be able
335 // to generate better code. Unfortunately, when this is the case the value
336 // (the splat) will get hoisted out of the loop, thereby making it invisible
337 // to ISel. The cost model must return worst case assumptions because it is
338 // used for vectorization and we don't want to make vectorized code worse
340 { ISD::SHL, MVT::v16i8, 30 }, // cmpeqb sequence.
341 { ISD::SHL, MVT::v8i16, 8*10 }, // Scalarized.
342 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
343 { ISD::SHL, MVT::v2i64, 2*10 }, // Scalarized.
344 { ISD::SHL, MVT::v4i64, 4*10 }, // Scalarized.
346 { ISD::SRL, MVT::v16i8, 16*10 }, // Scalarized.
347 { ISD::SRL, MVT::v8i16, 8*10 }, // Scalarized.
348 { ISD::SRL, MVT::v4i32, 4*10 }, // Scalarized.
349 { ISD::SRL, MVT::v2i64, 2*10 }, // Scalarized.
351 { ISD::SRA, MVT::v16i8, 16*10 }, // Scalarized.
352 { ISD::SRA, MVT::v8i16, 8*10 }, // Scalarized.
353 { ISD::SRA, MVT::v4i32, 4*10 }, // Scalarized.
354 { ISD::SRA, MVT::v2i64, 2*10 }, // Scalarized.
356 // It is not a good idea to vectorize division. We have to scalarize it and
357 // in the process we will often end up having to spilling regular
358 // registers. The overhead of division is going to dominate most kernels
359 // anyways so try hard to prevent vectorization of division - it is
360 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
361 // to hide "20 cycles" for each lane.
362 { ISD::SDIV, MVT::v16i8, 16*20 },
363 { ISD::SDIV, MVT::v8i16, 8*20 },
364 { ISD::SDIV, MVT::v4i32, 4*20 },
365 { ISD::SDIV, MVT::v2i64, 2*20 },
366 { ISD::UDIV, MVT::v16i8, 16*20 },
367 { ISD::UDIV, MVT::v8i16, 8*20 },
368 { ISD::UDIV, MVT::v4i32, 4*20 },
369 { ISD::UDIV, MVT::v2i64, 2*20 },
373 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
375 return LT.first * SSE2CostTable[Idx].Cost;
378 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
379 // We don't have to scalarize unsupported ops. We can issue two half-sized
380 // operations and we only need to extract the upper YMM half.
381 // Two ops + 1 extract + 1 insert = 4.
382 { ISD::MUL, MVT::v16i16, 4 },
383 { ISD::MUL, MVT::v8i32, 4 },
384 { ISD::SUB, MVT::v8i32, 4 },
385 { ISD::ADD, MVT::v8i32, 4 },
386 { ISD::SUB, MVT::v4i64, 4 },
387 { ISD::ADD, MVT::v4i64, 4 },
388 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
389 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
390 // Because we believe v4i64 to be a legal type, we must also include the
391 // split factor of two in the cost table. Therefore, the cost here is 18
393 { ISD::MUL, MVT::v4i64, 18 },
396 // Look for AVX1 lowering tricks.
397 if (ST->hasAVX() && !ST->hasAVX2()) {
400 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
401 // sequence of extract + two vector multiply + insert.
402 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
403 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
406 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
408 return LT.first * AVX1CostTable[Idx].Cost;
411 // Custom lowering of vectors.
412 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
413 // A v2i64/v4i64 and multiply is custom lowered as a series of long
414 // multiplies(3), shifts(4) and adds(2).
415 { ISD::MUL, MVT::v2i64, 9 },
416 { ISD::MUL, MVT::v4i64, 9 },
418 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
420 return LT.first * CustomLowered[Idx].Cost;
422 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
423 // 2x pmuludq, 2x shuffle.
424 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
428 // Fallback to the default implementation.
429 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
432 unsigned X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
434 // We only estimate the cost of reverse and alternate shuffles.
435 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
436 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
438 if (Kind == TTI::SK_Reverse) {
439 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
441 if (LT.second.getSizeInBits() > 128)
442 Cost = 3; // Extract + insert + copy.
444 // Multiple by the number of parts.
445 return Cost * LT.first;
448 if (Kind == TTI::SK_Alternate) {
449 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
450 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
451 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Tp);
453 // The backend knows how to generate a single VEX.256 version of
454 // instruction VPBLENDW if the target supports AVX2.
455 if (ST->hasAVX2() && LT.second == MVT::v16i16)
458 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
459 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
460 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
462 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
463 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
465 // This shuffle is custom lowered into a sequence of:
466 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
467 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
469 // This shuffle is custom lowered into a long sequence of:
470 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
471 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
475 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
477 return LT.first * AVXAltShuffleTbl[Idx].Cost;
480 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
481 // These are lowered into movsd.
482 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
483 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
485 // packed float vectors with four elements are lowered into BLENDI dag
486 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
487 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
488 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
490 // This shuffle generates a single pshufw.
491 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
493 // There is no instruction that matches a v16i8 alternate shuffle.
494 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
495 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
498 if (ST->hasSSE41()) {
499 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
501 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
504 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
505 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
506 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
508 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
509 // the sequence 'shufps + pshufd'
510 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
511 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
513 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
514 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
517 if (ST->hasSSSE3()) {
518 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
520 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
523 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
524 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
525 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
527 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
528 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
530 // This is expanded into a long sequence of four extract + four insert.
531 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
533 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
534 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
537 // Fall-back (SSE3 and SSE2).
538 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
540 return LT.first * SSEAltShuffleTbl[Idx].Cost;
541 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
544 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
547 unsigned X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
548 int ISD = TLI->InstructionOpcodeToISD(Opcode);
549 assert(ISD && "Invalid opcode");
551 std::pair<unsigned, MVT> LTSrc = TLI->getTypeLegalizationCost(Src);
552 std::pair<unsigned, MVT> LTDest = TLI->getTypeLegalizationCost(Dst);
554 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
556 // These are somewhat magic numbers justified by looking at the output of
557 // Intel's IACA, running some kernels and making sure when we take
558 // legalization into account the throughput will be overestimated.
559 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
560 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
561 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
562 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
563 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
564 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
565 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
566 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
567 // There are faster sequences for float conversions.
568 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
569 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
570 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
571 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
572 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
573 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
574 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
575 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
578 if (ST->hasSSE2() && !ST->hasAVX()) {
580 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
582 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
585 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
586 AVX512ConversionTbl[] = {
587 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
588 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
589 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
590 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
592 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
593 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
594 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
595 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
596 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
598 // v16i1 -> v16i32 - load + broadcast
599 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
600 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
602 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
603 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
604 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
605 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
606 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
607 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
609 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
610 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
611 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
612 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
613 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
614 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
615 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
618 if (ST->hasAVX512()) {
619 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
622 return AVX512ConversionTbl[Idx].Cost;
624 EVT SrcTy = TLI->getValueType(Src);
625 EVT DstTy = TLI->getValueType(Dst);
627 // The function getSimpleVT only handles simple value types.
628 if (!SrcTy.isSimple() || !DstTy.isSimple())
629 return BaseT::getCastInstrCost(Opcode, Dst, Src);
631 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
632 AVX2ConversionTbl[] = {
633 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
634 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
635 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
636 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
637 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
638 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
639 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
640 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
641 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
642 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
643 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
644 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
645 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
646 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
647 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
648 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
650 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
651 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
652 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
653 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
654 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
655 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
657 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
658 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
660 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
663 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
664 AVXConversionTbl[] = {
665 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
666 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
667 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
668 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
669 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
670 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
671 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
672 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
673 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
674 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
675 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
676 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
677 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
678 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
679 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
680 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
682 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
683 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
684 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
685 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
686 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
687 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
688 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
690 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
691 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
692 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
693 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
694 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
695 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
696 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
697 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
698 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
699 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
700 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
701 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
703 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
704 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
705 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
706 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
707 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
708 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
709 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
710 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
711 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
712 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
713 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
714 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
715 // The generic code to compute the scalar overhead is currently broken.
716 // Workaround this limitation by estimating the scalarization overhead
717 // here. We have roughly 10 instructions per scalar element.
718 // Multiply that by the vector width.
719 // FIXME: remove that when PR19268 is fixed.
720 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
721 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
723 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
724 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
725 // This node is expanded into scalarized operations but BasicTTI is overly
726 // optimistic estimating its cost. It computes 3 per element (one
727 // vector-extract, one scalar conversion and one vector-insert). The
728 // problem is that the inserts form a read-modify-write chain so latency
729 // should be factored in too. Inflating the cost per element by 1.
730 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
731 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
735 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
736 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
738 return AVX2ConversionTbl[Idx].Cost;
742 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
743 SrcTy.getSimpleVT());
745 return AVXConversionTbl[Idx].Cost;
748 return BaseT::getCastInstrCost(Opcode, Dst, Src);
751 unsigned X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy,
753 // Legalize the type.
754 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
758 int ISD = TLI->InstructionOpcodeToISD(Opcode);
759 assert(ISD && "Invalid opcode");
761 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
762 { ISD::SETCC, MVT::v2f64, 1 },
763 { ISD::SETCC, MVT::v4f32, 1 },
764 { ISD::SETCC, MVT::v2i64, 1 },
765 { ISD::SETCC, MVT::v4i32, 1 },
766 { ISD::SETCC, MVT::v8i16, 1 },
767 { ISD::SETCC, MVT::v16i8, 1 },
770 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
771 { ISD::SETCC, MVT::v4f64, 1 },
772 { ISD::SETCC, MVT::v8f32, 1 },
773 // AVX1 does not support 8-wide integer compare.
774 { ISD::SETCC, MVT::v4i64, 4 },
775 { ISD::SETCC, MVT::v8i32, 4 },
776 { ISD::SETCC, MVT::v16i16, 4 },
777 { ISD::SETCC, MVT::v32i8, 4 },
780 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
781 { ISD::SETCC, MVT::v4i64, 1 },
782 { ISD::SETCC, MVT::v8i32, 1 },
783 { ISD::SETCC, MVT::v16i16, 1 },
784 { ISD::SETCC, MVT::v32i8, 1 },
787 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
788 { ISD::SETCC, MVT::v8i64, 1 },
789 { ISD::SETCC, MVT::v16i32, 1 },
790 { ISD::SETCC, MVT::v8f64, 1 },
791 { ISD::SETCC, MVT::v16f32, 1 },
794 if (ST->hasAVX512()) {
795 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
797 return LT.first * AVX512CostTbl[Idx].Cost;
801 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
803 return LT.first * AVX2CostTbl[Idx].Cost;
807 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
809 return LT.first * AVX1CostTbl[Idx].Cost;
812 if (ST->hasSSE42()) {
813 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
815 return LT.first * SSE42CostTbl[Idx].Cost;
818 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
821 unsigned X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val,
823 assert(Val->isVectorTy() && "This must be a vector type");
826 // Legalize the type.
827 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Val);
829 // This type is legalized to a scalar type.
830 if (!LT.second.isVector())
833 // The type may be split. Normalize the index to the new type.
834 unsigned Width = LT.second.getVectorNumElements();
835 Index = Index % Width;
837 // Floating point scalars are already located in index #0.
838 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
842 return BaseT::getVectorInstrCost(Opcode, Val, Index);
845 unsigned X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert,
847 assert (Ty->isVectorTy() && "Can only scalarize vectors");
850 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
852 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
854 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
860 unsigned X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src,
862 unsigned AddressSpace) {
863 // Handle non-power-of-two vectors such as <3 x float>
864 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
865 unsigned NumElem = VTy->getVectorNumElements();
867 // Handle a few common cases:
869 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
870 // Cost = 64 bit store + extract + 32 bit store.
874 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
875 // Cost = 128 bit store + unpack + 64 bit store.
878 // Assume that all other non-power-of-two numbers are scalarized.
879 if (!isPowerOf2_32(NumElem)) {
880 unsigned Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(),
881 Alignment, AddressSpace);
882 unsigned SplitCost = getScalarizationOverhead(Src,
883 Opcode == Instruction::Load,
884 Opcode==Instruction::Store);
885 return NumElem * Cost + SplitCost;
889 // Legalize the type.
890 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(Src);
891 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
894 // Each load/store unit costs 1.
895 unsigned Cost = LT.first * 1;
897 // On Sandybridge 256bit load/stores are double pumped
898 // (but not on Haswell).
899 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
905 unsigned X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
907 unsigned AddressSpace) {
908 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
910 // To calculate scalar take the regular cost, without mask
911 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
913 unsigned NumElem = SrcVTy->getVectorNumElements();
915 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
916 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
917 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
918 !isPowerOf2_32(NumElem)) {
920 unsigned MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
921 unsigned ScalarCompareCost =
922 getCmpSelInstrCost(Instruction::ICmp,
923 Type::getInt8Ty(getGlobalContext()), NULL);
924 unsigned BranchCost = getCFInstrCost(Instruction::Br);
925 unsigned MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
927 unsigned ValueSplitCost =
928 getScalarizationOverhead(SrcVTy, Opcode == Instruction::Load,
929 Opcode == Instruction::Store);
931 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
932 Alignment, AddressSpace);
933 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
936 // Legalize the type.
937 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(SrcVTy);
939 if (LT.second != TLI->getValueType(SrcVTy).getSimpleVT() &&
940 LT.second.getVectorNumElements() == NumElem)
941 // Promotion requires expand/truncate for data and a shuffle for mask.
942 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
943 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
945 else if (LT.second.getVectorNumElements() > NumElem) {
946 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
947 LT.second.getVectorNumElements());
948 // Expanding requires fill mask with zeroes
949 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
951 if (!ST->hasAVX512())
952 return Cost + LT.first*4; // Each maskmov costs 4
954 // AVX-512 masked load/store is cheapper
955 return Cost+LT.first;
958 unsigned X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
959 // Address computations in vectorized code with non-consecutive addresses will
960 // likely result in more instructions compared to scalar code where the
961 // computation can more often be merged into the index mode. The resulting
962 // extra micro-ops can significantly decrease throughput.
963 unsigned NumVectorInstToHideOverhead = 10;
965 if (Ty->isVectorTy() && IsComplex)
966 return NumVectorInstToHideOverhead;
968 return BaseT::getAddressComputationCost(Ty, IsComplex);
971 unsigned X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
974 std::pair<unsigned, MVT> LT = TLI->getTypeLegalizationCost(ValTy);
978 int ISD = TLI->InstructionOpcodeToISD(Opcode);
979 assert(ISD && "Invalid opcode");
981 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
982 // and make it as the cost.
984 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
985 { ISD::FADD, MVT::v2f64, 2 },
986 { ISD::FADD, MVT::v4f32, 4 },
987 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
988 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
989 { ISD::ADD, MVT::v8i16, 5 },
992 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
993 { ISD::FADD, MVT::v4f32, 4 },
994 { ISD::FADD, MVT::v4f64, 5 },
995 { ISD::FADD, MVT::v8f32, 7 },
996 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
997 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
998 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
999 { ISD::ADD, MVT::v8i16, 5 },
1000 { ISD::ADD, MVT::v8i32, 5 },
1003 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
1004 { ISD::FADD, MVT::v2f64, 2 },
1005 { ISD::FADD, MVT::v4f32, 4 },
1006 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
1007 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
1008 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
1011 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
1012 { ISD::FADD, MVT::v4f32, 3 },
1013 { ISD::FADD, MVT::v4f64, 3 },
1014 { ISD::FADD, MVT::v8f32, 4 },
1015 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1016 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1017 { ISD::ADD, MVT::v4i64, 3 },
1018 { ISD::ADD, MVT::v8i16, 4 },
1019 { ISD::ADD, MVT::v8i32, 5 },
1024 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
1026 return LT.first * AVX1CostTblPairWise[Idx].Cost;
1029 if (ST->hasSSE42()) {
1030 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
1032 return LT.first * SSE42CostTblPairWise[Idx].Cost;
1036 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
1038 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
1041 if (ST->hasSSE42()) {
1042 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
1044 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
1048 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1051 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1052 /// method might only calculate a fraction of a larger immediate. Therefore it
1053 /// is valid to return a cost of ZERO.
1054 unsigned X86TTIImpl::getIntImmCost(int64_t Val) {
1056 return TTI::TCC_Free;
1059 return TTI::TCC_Basic;
1061 return 2 * TTI::TCC_Basic;
1064 unsigned X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1065 assert(Ty->isIntegerTy());
1067 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1071 // Never hoist constants larger than 128bit, because this might lead to
1072 // incorrect code generation or assertions in codegen.
1073 // Fixme: Create a cost model for types larger than i128 once the codegen
1074 // issues have been fixed.
1076 return TTI::TCC_Free;
1079 return TTI::TCC_Free;
1081 // Sign-extend all constants to a multiple of 64-bit.
1084 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1086 // Split the constant into 64-bit chunks and calculate the cost for each
1089 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1090 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1091 int64_t Val = Tmp.getSExtValue();
1092 Cost += getIntImmCost(Val);
1094 // We need at least one instruction to materialze the constant.
1095 return std::max(1U, Cost);
1098 unsigned X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx,
1099 const APInt &Imm, Type *Ty) {
1100 assert(Ty->isIntegerTy());
1102 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1103 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1104 // here, so that constant hoisting will ignore this constant.
1106 return TTI::TCC_Free;
1108 unsigned ImmIdx = ~0U;
1111 return TTI::TCC_Free;
1112 case Instruction::GetElementPtr:
1113 // Always hoist the base address of a GetElementPtr. This prevents the
1114 // creation of new constants for every base constant that gets constant
1115 // folded with the offset.
1117 return 2 * TTI::TCC_Basic;
1118 return TTI::TCC_Free;
1119 case Instruction::Store:
1122 case Instruction::Add:
1123 case Instruction::Sub:
1124 case Instruction::Mul:
1125 case Instruction::UDiv:
1126 case Instruction::SDiv:
1127 case Instruction::URem:
1128 case Instruction::SRem:
1129 case Instruction::And:
1130 case Instruction::Or:
1131 case Instruction::Xor:
1132 case Instruction::ICmp:
1135 // Always return TCC_Free for the shift value of a shift instruction.
1136 case Instruction::Shl:
1137 case Instruction::LShr:
1138 case Instruction::AShr:
1140 return TTI::TCC_Free;
1142 case Instruction::Trunc:
1143 case Instruction::ZExt:
1144 case Instruction::SExt:
1145 case Instruction::IntToPtr:
1146 case Instruction::PtrToInt:
1147 case Instruction::BitCast:
1148 case Instruction::PHI:
1149 case Instruction::Call:
1150 case Instruction::Select:
1151 case Instruction::Ret:
1152 case Instruction::Load:
1156 if (Idx == ImmIdx) {
1157 unsigned NumConstants = (BitSize + 63) / 64;
1158 unsigned Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1159 return (Cost <= NumConstants * TTI::TCC_Basic)
1160 ? static_cast<unsigned>(TTI::TCC_Free)
1164 return X86TTIImpl::getIntImmCost(Imm, Ty);
1167 unsigned X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx,
1168 const APInt &Imm, Type *Ty) {
1169 assert(Ty->isIntegerTy());
1171 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1172 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1173 // here, so that constant hoisting will ignore this constant.
1175 return TTI::TCC_Free;
1179 return TTI::TCC_Free;
1180 case Intrinsic::sadd_with_overflow:
1181 case Intrinsic::uadd_with_overflow:
1182 case Intrinsic::ssub_with_overflow:
1183 case Intrinsic::usub_with_overflow:
1184 case Intrinsic::smul_with_overflow:
1185 case Intrinsic::umul_with_overflow:
1186 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1187 return TTI::TCC_Free;
1189 case Intrinsic::experimental_stackmap:
1190 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1191 return TTI::TCC_Free;
1193 case Intrinsic::experimental_patchpoint_void:
1194 case Intrinsic::experimental_patchpoint_i64:
1195 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1196 return TTI::TCC_Free;
1199 return X86TTIImpl::getIntImmCost(Imm, Ty);
1202 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1203 int DataWidth = DataTy->getPrimitiveSizeInBits();
1205 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1206 if ((DataWidth < 32) || (Consecutive == 0))
1208 if (ST->hasAVX512() || ST->hasAVX2())
1213 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1214 return isLegalMaskedLoad(DataType, Consecutive);