1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "x86tti"
29 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
35 TargetTransformInfo::PopcntSupportKind
36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
38 // TODO: Currently the __builtin_popcount() implementation using SSE3
39 // instructions is inefficient. Once the problem is fixed, we should
40 // call ST->hasSSE3() instead of ST->hasPOPCNT().
41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
45 if (Vector && !ST->hasSSE1())
49 if (Vector && ST->hasAVX512())
56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
58 if (ST->hasAVX512()) return 512;
59 if (ST->hasAVX()) return 256;
60 if (ST->hasSSE1()) return 128;
70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
71 // If the loop will not be vectorized, don't interleave the loop.
72 // Let regular unroll to unroll the loop, which saves the overflow
73 // check and memory check cost.
80 // Sandybridge and Haswell have multiple execution ports and pipelined
88 int X86TTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
95 int ISD = TLI->InstructionOpcodeToISD(Opcode);
96 assert(ISD && "Invalid opcode");
98 if (ISD == ISD::SDIV &&
99 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
101 // On X86, vector signed division by constants power-of-two are
102 // normally expanded to the sequence SRA + SRL + ADD + SRA.
103 // The OperandValue properties many not be same as that of previous
104 // operation;conservatively assume OP_None.
105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
106 Op2Info, TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
118 static const CostTblEntry<MVT::SimpleValueType>
119 AVX2UniformConstCostTable[] = {
120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
132 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
136 { ISD::SHL, MVT::v16i32, 1 },
137 { ISD::SRL, MVT::v16i32, 1 },
138 { ISD::SRA, MVT::v16i32, 1 },
139 { ISD::SHL, MVT::v8i64, 1 },
140 { ISD::SRL, MVT::v8i64, 1 },
141 { ISD::SRA, MVT::v8i64, 1 },
144 if (ST->hasAVX512()) {
145 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
147 return LT.first * AVX512CostTable[Idx].Cost;
150 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
151 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
152 // customize them to detect the cases where shift amount is a scalar one.
153 { ISD::SHL, MVT::v4i32, 1 },
154 { ISD::SRL, MVT::v4i32, 1 },
155 { ISD::SRA, MVT::v4i32, 1 },
156 { ISD::SHL, MVT::v8i32, 1 },
157 { ISD::SRL, MVT::v8i32, 1 },
158 { ISD::SRA, MVT::v8i32, 1 },
159 { ISD::SHL, MVT::v2i64, 1 },
160 { ISD::SRL, MVT::v2i64, 1 },
161 { ISD::SHL, MVT::v4i64, 1 },
162 { ISD::SRL, MVT::v4i64, 1 },
165 // Look for AVX2 lowering tricks.
167 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
168 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
169 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
170 // On AVX2, a packed v16i16 shift left by a constant build_vector
171 // is lowered into a vector multiply (vpmullw).
174 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
176 return LT.first * AVX2CostTable[Idx].Cost;
179 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = {
180 // 128bit shifts take 1cy, but right shifts require negation beforehand.
181 { ISD::SHL, MVT::v16i8, 1 },
182 { ISD::SRL, MVT::v16i8, 2 },
183 { ISD::SRA, MVT::v16i8, 2 },
184 { ISD::SHL, MVT::v8i16, 1 },
185 { ISD::SRL, MVT::v8i16, 2 },
186 { ISD::SRA, MVT::v8i16, 2 },
187 { ISD::SHL, MVT::v4i32, 1 },
188 { ISD::SRL, MVT::v4i32, 2 },
189 { ISD::SRA, MVT::v4i32, 2 },
190 { ISD::SHL, MVT::v2i64, 1 },
191 { ISD::SRL, MVT::v2i64, 2 },
192 { ISD::SRA, MVT::v2i64, 2 },
193 // 256bit shifts require splitting if AVX2 didn't catch them above.
194 { ISD::SHL, MVT::v32i8, 2 },
195 { ISD::SRL, MVT::v32i8, 4 },
196 { ISD::SRA, MVT::v32i8, 4 },
197 { ISD::SHL, MVT::v16i16, 2 },
198 { ISD::SRL, MVT::v16i16, 4 },
199 { ISD::SRA, MVT::v16i16, 4 },
200 { ISD::SHL, MVT::v8i32, 2 },
201 { ISD::SRL, MVT::v8i32, 4 },
202 { ISD::SRA, MVT::v8i32, 4 },
203 { ISD::SHL, MVT::v4i64, 2 },
204 { ISD::SRL, MVT::v4i64, 4 },
205 { ISD::SRA, MVT::v4i64, 4 },
208 // Look for XOP lowering tricks.
210 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second);
212 return LT.first * XOPCostTable[Idx].Cost;
215 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = {
216 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
217 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
219 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
220 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
222 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
223 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
224 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
225 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
227 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
228 { ISD::SDIV, MVT::v32i8, 32*20 },
229 { ISD::SDIV, MVT::v16i16, 16*20 },
230 { ISD::SDIV, MVT::v8i32, 8*20 },
231 { ISD::SDIV, MVT::v4i64, 4*20 },
232 { ISD::UDIV, MVT::v32i8, 32*20 },
233 { ISD::UDIV, MVT::v16i16, 16*20 },
234 { ISD::UDIV, MVT::v8i32, 8*20 },
235 { ISD::UDIV, MVT::v4i64, 4*20 },
238 // Look for AVX2 lowering tricks for custom cases.
240 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second);
242 return LT.first * AVX2CustomCostTable[Idx].Cost;
245 static const CostTblEntry<MVT::SimpleValueType>
246 SSE2UniformConstCostTable[] = {
247 // We don't correctly identify costs of casts because they are marked as
249 // Constant splats are cheaper for the following instructions.
250 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
251 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
252 { ISD::SHL, MVT::v4i32, 1 }, // pslld
253 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
256 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
257 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
258 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
260 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
261 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
262 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
263 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
265 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
266 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
267 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
268 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
271 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
274 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
275 return LT.first * 15;
277 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
279 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
282 if (ISD == ISD::SHL &&
283 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
285 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
286 (VT == MVT::v4i32 && ST->hasSSE41()))
287 // Vector shift left by non uniform constant can be lowered
288 // into vector multiply (pmullw/pmulld).
290 if (VT == MVT::v4i32 && ST->hasSSE2())
291 // A vector shift left by non uniform constant is converted
292 // into a vector multiply; the new multiply is eventually
293 // lowered into a sequence of shuffles and 2 x pmuludq.
297 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
298 // We don't correctly identify costs of casts because they are marked as
300 // For some cases, where the shift amount is a scalar we would be able
301 // to generate better code. Unfortunately, when this is the case the value
302 // (the splat) will get hoisted out of the loop, thereby making it invisible
303 // to ISel. The cost model must return worst case assumptions because it is
304 // used for vectorization and we don't want to make vectorized code worse
306 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
307 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
308 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
309 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
310 { ISD::SHL, MVT::v4i64, 8 }, // splat+shuffle sequence.
312 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
313 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
314 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
315 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
317 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
318 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
319 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
320 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
322 // It is not a good idea to vectorize division. We have to scalarize it and
323 // in the process we will often end up having to spilling regular
324 // registers. The overhead of division is going to dominate most kernels
325 // anyways so try hard to prevent vectorization of division - it is
326 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
327 // to hide "20 cycles" for each lane.
328 { ISD::SDIV, MVT::v16i8, 16*20 },
329 { ISD::SDIV, MVT::v8i16, 8*20 },
330 { ISD::SDIV, MVT::v4i32, 4*20 },
331 { ISD::SDIV, MVT::v2i64, 2*20 },
332 { ISD::UDIV, MVT::v16i8, 16*20 },
333 { ISD::UDIV, MVT::v8i16, 8*20 },
334 { ISD::UDIV, MVT::v4i32, 4*20 },
335 { ISD::UDIV, MVT::v2i64, 2*20 },
339 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
341 return LT.first * SSE2CostTable[Idx].Cost;
344 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
345 // We don't have to scalarize unsupported ops. We can issue two half-sized
346 // operations and we only need to extract the upper YMM half.
347 // Two ops + 1 extract + 1 insert = 4.
348 { ISD::MUL, MVT::v16i16, 4 },
349 { ISD::MUL, MVT::v8i32, 4 },
350 { ISD::SUB, MVT::v8i32, 4 },
351 { ISD::ADD, MVT::v8i32, 4 },
352 { ISD::SUB, MVT::v4i64, 4 },
353 { ISD::ADD, MVT::v4i64, 4 },
354 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
355 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
356 // Because we believe v4i64 to be a legal type, we must also include the
357 // split factor of two in the cost table. Therefore, the cost here is 18
359 { ISD::MUL, MVT::v4i64, 18 },
362 // Look for AVX1 lowering tricks.
363 if (ST->hasAVX() && !ST->hasAVX2()) {
366 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
367 // sequence of extract + two vector multiply + insert.
368 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
369 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
372 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
374 return LT.first * AVX1CostTable[Idx].Cost;
377 // Custom lowering of vectors.
378 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
379 // A v2i64/v4i64 and multiply is custom lowered as a series of long
380 // multiplies(3), shifts(4) and adds(2).
381 { ISD::MUL, MVT::v2i64, 9 },
382 { ISD::MUL, MVT::v4i64, 9 },
384 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
386 return LT.first * CustomLowered[Idx].Cost;
388 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
389 // 2x pmuludq, 2x shuffle.
390 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
394 // Fallback to the default implementation.
395 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
398 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
400 // We only estimate the cost of reverse and alternate shuffles.
401 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
402 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
404 if (Kind == TTI::SK_Reverse) {
405 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
407 if (LT.second.getSizeInBits() > 128)
408 Cost = 3; // Extract + insert + copy.
410 // Multiple by the number of parts.
411 return Cost * LT.first;
414 if (Kind == TTI::SK_Alternate) {
415 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
416 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
417 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
419 // The backend knows how to generate a single VEX.256 version of
420 // instruction VPBLENDW if the target supports AVX2.
421 if (ST->hasAVX2() && LT.second == MVT::v16i16)
424 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
425 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
426 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
428 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
429 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
431 // This shuffle is custom lowered into a sequence of:
432 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
433 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
435 // This shuffle is custom lowered into a long sequence of:
436 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
437 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
441 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
443 return LT.first * AVXAltShuffleTbl[Idx].Cost;
446 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
447 // These are lowered into movsd.
448 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
449 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
451 // packed float vectors with four elements are lowered into BLENDI dag
452 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
453 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
454 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
456 // This shuffle generates a single pshufw.
457 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
459 // There is no instruction that matches a v16i8 alternate shuffle.
460 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
461 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
464 if (ST->hasSSE41()) {
465 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
467 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
470 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
471 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
472 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
474 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
475 // the sequence 'shufps + pshufd'
476 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
477 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
479 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
480 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
483 if (ST->hasSSSE3()) {
484 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
486 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
489 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
490 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
491 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
493 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
494 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
496 // This is expanded into a long sequence of four extract + four insert.
497 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
499 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
500 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
503 // Fall-back (SSE3 and SSE2).
504 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
506 return LT.first * SSEAltShuffleTbl[Idx].Cost;
507 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
510 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
513 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
514 int ISD = TLI->InstructionOpcodeToISD(Opcode);
515 assert(ISD && "Invalid opcode");
517 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
518 AVX512ConversionTbl[] = {
519 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
520 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
521 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
522 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
524 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
525 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
526 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
527 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
528 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
530 // v16i1 -> v16i32 - load + broadcast
531 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
532 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
534 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
535 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
536 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
537 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
538 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
539 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
541 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
542 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
543 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
544 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
545 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
546 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
547 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
550 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
551 AVX2ConversionTbl[] = {
552 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
553 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
554 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
555 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
556 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
557 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
558 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
559 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
560 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
561 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
562 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
563 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
564 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
565 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
566 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
567 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
569 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
570 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
571 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
572 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
573 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
574 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
576 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
577 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
579 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
582 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
583 AVXConversionTbl[] = {
584 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
585 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
586 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
587 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
588 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
589 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
590 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
591 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
592 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
593 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
594 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
595 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
596 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
597 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
598 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
599 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
601 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
602 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
603 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
604 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
605 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
606 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
607 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
609 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
610 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
611 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
612 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
615 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
616 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
617 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
618 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
619 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
620 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
622 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
623 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
625 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
627 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
628 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
629 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
630 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
632 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
633 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
634 // The generic code to compute the scalar overhead is currently broken.
635 // Workaround this limitation by estimating the scalarization overhead
636 // here. We have roughly 10 instructions per scalar element.
637 // Multiply that by the vector width.
638 // FIXME: remove that when PR19268 is fixed.
639 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
640 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
642 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
643 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
644 // This node is expanded into scalarized operations but BasicTTI is overly
645 // optimistic estimating its cost. It computes 3 per element (one
646 // vector-extract, one scalar conversion and one vector-insert). The
647 // problem is that the inserts form a read-modify-write chain so latency
648 // should be factored in too. Inflating the cost per element by 1.
649 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
650 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
653 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
655 // These are somewhat magic numbers justified by looking at the output of
656 // Intel's IACA, running some kernels and making sure when we take
657 // legalization into account the throughput will be overestimated.
658 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
659 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
660 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
661 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
662 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
663 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
664 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
665 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
666 // There are faster sequences for float conversions.
667 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
668 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
669 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
670 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
671 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
672 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
673 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
674 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
677 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
678 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
680 if (ST->hasSSE2() && !ST->hasAVX()) {
682 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
684 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
687 if (ST->hasAVX512()) {
688 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
691 return AVX512ConversionTbl[Idx].Cost;
694 EVT SrcTy = TLI->getValueType(DL, Src);
695 EVT DstTy = TLI->getValueType(DL, Dst);
697 // The function getSimpleVT only handles simple value types.
698 if (!SrcTy.isSimple() || !DstTy.isSimple())
699 return BaseT::getCastInstrCost(Opcode, Dst, Src);
702 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
703 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
705 return AVX2ConversionTbl[Idx].Cost;
709 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
710 SrcTy.getSimpleVT());
712 return AVXConversionTbl[Idx].Cost;
715 return BaseT::getCastInstrCost(Opcode, Dst, Src);
718 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
719 // Legalize the type.
720 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
724 int ISD = TLI->InstructionOpcodeToISD(Opcode);
725 assert(ISD && "Invalid opcode");
727 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
728 { ISD::SETCC, MVT::v2f64, 1 },
729 { ISD::SETCC, MVT::v4f32, 1 },
730 { ISD::SETCC, MVT::v2i64, 1 },
731 { ISD::SETCC, MVT::v4i32, 1 },
732 { ISD::SETCC, MVT::v8i16, 1 },
733 { ISD::SETCC, MVT::v16i8, 1 },
736 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
737 { ISD::SETCC, MVT::v4f64, 1 },
738 { ISD::SETCC, MVT::v8f32, 1 },
739 // AVX1 does not support 8-wide integer compare.
740 { ISD::SETCC, MVT::v4i64, 4 },
741 { ISD::SETCC, MVT::v8i32, 4 },
742 { ISD::SETCC, MVT::v16i16, 4 },
743 { ISD::SETCC, MVT::v32i8, 4 },
746 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
747 { ISD::SETCC, MVT::v4i64, 1 },
748 { ISD::SETCC, MVT::v8i32, 1 },
749 { ISD::SETCC, MVT::v16i16, 1 },
750 { ISD::SETCC, MVT::v32i8, 1 },
753 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
754 { ISD::SETCC, MVT::v8i64, 1 },
755 { ISD::SETCC, MVT::v16i32, 1 },
756 { ISD::SETCC, MVT::v8f64, 1 },
757 { ISD::SETCC, MVT::v16f32, 1 },
760 if (ST->hasAVX512()) {
761 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
763 return LT.first * AVX512CostTbl[Idx].Cost;
767 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
769 return LT.first * AVX2CostTbl[Idx].Cost;
773 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
775 return LT.first * AVX1CostTbl[Idx].Cost;
778 if (ST->hasSSE42()) {
779 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
781 return LT.first * SSE42CostTbl[Idx].Cost;
784 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
787 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
788 assert(Val->isVectorTy() && "This must be a vector type");
791 // Legalize the type.
792 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
794 // This type is legalized to a scalar type.
795 if (!LT.second.isVector())
798 // The type may be split. Normalize the index to the new type.
799 unsigned Width = LT.second.getVectorNumElements();
800 Index = Index % Width;
802 // Floating point scalars are already located in index #0.
803 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
807 return BaseT::getVectorInstrCost(Opcode, Val, Index);
810 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
811 assert (Ty->isVectorTy() && "Can only scalarize vectors");
814 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
816 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
818 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
824 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
825 unsigned AddressSpace) {
826 // Handle non-power-of-two vectors such as <3 x float>
827 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
828 unsigned NumElem = VTy->getVectorNumElements();
830 // Handle a few common cases:
832 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
833 // Cost = 64 bit store + extract + 32 bit store.
837 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
838 // Cost = 128 bit store + unpack + 64 bit store.
841 // Assume that all other non-power-of-two numbers are scalarized.
842 if (!isPowerOf2_32(NumElem)) {
843 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
845 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
846 Opcode == Instruction::Store);
847 return NumElem * Cost + SplitCost;
851 // Legalize the type.
852 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
853 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
856 // Each load/store unit costs 1.
857 int Cost = LT.first * 1;
859 // On Sandybridge 256bit load/stores are double pumped
860 // (but not on Haswell).
861 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
867 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
869 unsigned AddressSpace) {
870 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
872 // To calculate scalar take the regular cost, without mask
873 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
875 unsigned NumElem = SrcVTy->getVectorNumElements();
877 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
878 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
879 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
880 !isPowerOf2_32(NumElem)) {
882 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
883 int ScalarCompareCost = getCmpSelInstrCost(
884 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
885 int BranchCost = getCFInstrCost(Instruction::Br);
886 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
888 int ValueSplitCost = getScalarizationOverhead(
889 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
891 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
892 Alignment, AddressSpace);
893 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
896 // Legalize the type.
897 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
899 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
900 LT.second.getVectorNumElements() == NumElem)
901 // Promotion requires expand/truncate for data and a shuffle for mask.
902 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
903 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
905 else if (LT.second.getVectorNumElements() > NumElem) {
906 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
907 LT.second.getVectorNumElements());
908 // Expanding requires fill mask with zeroes
909 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
911 if (!ST->hasAVX512())
912 return Cost + LT.first*4; // Each maskmov costs 4
914 // AVX-512 masked load/store is cheapper
915 return Cost+LT.first;
918 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
919 // Address computations in vectorized code with non-consecutive addresses will
920 // likely result in more instructions compared to scalar code where the
921 // computation can more often be merged into the index mode. The resulting
922 // extra micro-ops can significantly decrease throughput.
923 unsigned NumVectorInstToHideOverhead = 10;
925 if (Ty->isVectorTy() && IsComplex)
926 return NumVectorInstToHideOverhead;
928 return BaseT::getAddressComputationCost(Ty, IsComplex);
931 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
934 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
938 int ISD = TLI->InstructionOpcodeToISD(Opcode);
939 assert(ISD && "Invalid opcode");
941 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
942 // and make it as the cost.
944 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
945 { ISD::FADD, MVT::v2f64, 2 },
946 { ISD::FADD, MVT::v4f32, 4 },
947 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
948 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
949 { ISD::ADD, MVT::v8i16, 5 },
952 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
953 { ISD::FADD, MVT::v4f32, 4 },
954 { ISD::FADD, MVT::v4f64, 5 },
955 { ISD::FADD, MVT::v8f32, 7 },
956 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
957 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
958 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
959 { ISD::ADD, MVT::v8i16, 5 },
960 { ISD::ADD, MVT::v8i32, 5 },
963 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
964 { ISD::FADD, MVT::v2f64, 2 },
965 { ISD::FADD, MVT::v4f32, 4 },
966 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
967 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
968 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
971 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
972 { ISD::FADD, MVT::v4f32, 3 },
973 { ISD::FADD, MVT::v4f64, 3 },
974 { ISD::FADD, MVT::v8f32, 4 },
975 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
976 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
977 { ISD::ADD, MVT::v4i64, 3 },
978 { ISD::ADD, MVT::v8i16, 4 },
979 { ISD::ADD, MVT::v8i32, 5 },
984 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
986 return LT.first * AVX1CostTblPairWise[Idx].Cost;
989 if (ST->hasSSE42()) {
990 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
992 return LT.first * SSE42CostTblPairWise[Idx].Cost;
996 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
998 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
1001 if (ST->hasSSE42()) {
1002 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
1004 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
1008 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1011 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1012 /// method might only calculate a fraction of a larger immediate. Therefore it
1013 /// is valid to return a cost of ZERO.
1014 int X86TTIImpl::getIntImmCost(int64_t Val) {
1016 return TTI::TCC_Free;
1019 return TTI::TCC_Basic;
1021 return 2 * TTI::TCC_Basic;
1024 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1025 assert(Ty->isIntegerTy());
1027 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1031 // Never hoist constants larger than 128bit, because this might lead to
1032 // incorrect code generation or assertions in codegen.
1033 // Fixme: Create a cost model for types larger than i128 once the codegen
1034 // issues have been fixed.
1036 return TTI::TCC_Free;
1039 return TTI::TCC_Free;
1041 // Sign-extend all constants to a multiple of 64-bit.
1044 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1046 // Split the constant into 64-bit chunks and calculate the cost for each
1049 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1050 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1051 int64_t Val = Tmp.getSExtValue();
1052 Cost += getIntImmCost(Val);
1054 // We need at least one instruction to materialze the constant.
1055 return std::max(1, Cost);
1058 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1060 assert(Ty->isIntegerTy());
1062 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1063 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1064 // here, so that constant hoisting will ignore this constant.
1066 return TTI::TCC_Free;
1068 unsigned ImmIdx = ~0U;
1071 return TTI::TCC_Free;
1072 case Instruction::GetElementPtr:
1073 // Always hoist the base address of a GetElementPtr. This prevents the
1074 // creation of new constants for every base constant that gets constant
1075 // folded with the offset.
1077 return 2 * TTI::TCC_Basic;
1078 return TTI::TCC_Free;
1079 case Instruction::Store:
1082 case Instruction::And:
1083 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1084 // by using a 32-bit operation with implicit zero extension. Detect such
1085 // immediates here as the normal path expects bit 31 to be sign extended.
1086 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1087 return TTI::TCC_Free;
1089 case Instruction::Add:
1090 case Instruction::Sub:
1091 case Instruction::Mul:
1092 case Instruction::UDiv:
1093 case Instruction::SDiv:
1094 case Instruction::URem:
1095 case Instruction::SRem:
1096 case Instruction::Or:
1097 case Instruction::Xor:
1098 case Instruction::ICmp:
1101 // Always return TCC_Free for the shift value of a shift instruction.
1102 case Instruction::Shl:
1103 case Instruction::LShr:
1104 case Instruction::AShr:
1106 return TTI::TCC_Free;
1108 case Instruction::Trunc:
1109 case Instruction::ZExt:
1110 case Instruction::SExt:
1111 case Instruction::IntToPtr:
1112 case Instruction::PtrToInt:
1113 case Instruction::BitCast:
1114 case Instruction::PHI:
1115 case Instruction::Call:
1116 case Instruction::Select:
1117 case Instruction::Ret:
1118 case Instruction::Load:
1122 if (Idx == ImmIdx) {
1123 int NumConstants = (BitSize + 63) / 64;
1124 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1125 return (Cost <= NumConstants * TTI::TCC_Basic)
1126 ? static_cast<int>(TTI::TCC_Free)
1130 return X86TTIImpl::getIntImmCost(Imm, Ty);
1133 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1135 assert(Ty->isIntegerTy());
1137 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1138 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1139 // here, so that constant hoisting will ignore this constant.
1141 return TTI::TCC_Free;
1145 return TTI::TCC_Free;
1146 case Intrinsic::sadd_with_overflow:
1147 case Intrinsic::uadd_with_overflow:
1148 case Intrinsic::ssub_with_overflow:
1149 case Intrinsic::usub_with_overflow:
1150 case Intrinsic::smul_with_overflow:
1151 case Intrinsic::umul_with_overflow:
1152 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1153 return TTI::TCC_Free;
1155 case Intrinsic::experimental_stackmap:
1156 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1157 return TTI::TCC_Free;
1159 case Intrinsic::experimental_patchpoint_void:
1160 case Intrinsic::experimental_patchpoint_i64:
1161 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1162 return TTI::TCC_Free;
1165 return X86TTIImpl::getIntImmCost(Imm, Ty);
1168 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1169 int DataWidth = DataTy->getPrimitiveSizeInBits();
1171 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1172 if ((DataWidth < 32) || (Consecutive == 0))
1174 if (ST->hasAVX512() || ST->hasAVX2())
1179 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1180 return isLegalMaskedLoad(DataType, Consecutive);
1183 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1184 const Function *Callee) const {
1185 const TargetMachine &TM = getTLI()->getTargetMachine();
1187 // Work this as a subsetting of subtarget features.
1188 const FeatureBitset &CallerBits =
1189 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1190 const FeatureBitset &CalleeBits =
1191 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1193 // FIXME: This is likely too limiting as it will include subtarget features
1194 // that we might not care about for inlining, but it is conservatively
1196 return (CallerBits & CalleeBits) == CalleeBits;