1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
26 #define DEBUG_TYPE "x86tti"
28 //===----------------------------------------------------------------------===//
32 //===----------------------------------------------------------------------===//
34 TargetTransformInfo::PopcntSupportKind
35 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
36 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
37 // TODO: Currently the __builtin_popcount() implementation using SSE3
38 // instructions is inefficient. Once the problem is fixed, we should
39 // call ST->hasSSE3() instead of ST->hasPOPCNT().
40 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
43 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
44 if (Vector && !ST->hasSSE1())
48 if (Vector && ST->hasAVX512())
55 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
57 if (ST->hasAVX512()) return 512;
58 if (ST->hasAVX()) return 256;
59 if (ST->hasSSE1()) return 128;
69 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
70 // If the loop will not be vectorized, don't interleave the loop.
71 // Let regular unroll to unroll the loop, which saves the overflow
72 // check and memory check cost.
79 // Sandybridge and Haswell have multiple execution ports and pipelined
87 int X86TTIImpl::getArithmeticInstrCost(
88 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
89 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
90 TTI::OperandValueProperties Opd2PropInfo) {
92 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
94 int ISD = TLI->InstructionOpcodeToISD(Opcode);
95 assert(ISD && "Invalid opcode");
97 if (ISD == ISD::SDIV &&
98 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
99 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
100 // On X86, vector signed division by constants power-of-two are
101 // normally expanded to the sequence SRA + SRL + ADD + SRA.
102 // The OperandValue properties many not be same as that of previous
103 // operation;conservatively assume OP_None.
104 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
105 Op2Info, TargetTransformInfo::OP_None,
106 TargetTransformInfo::OP_None);
107 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
108 TargetTransformInfo::OP_None,
109 TargetTransformInfo::OP_None);
110 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
111 TargetTransformInfo::OP_None,
112 TargetTransformInfo::OP_None);
117 static const CostTblEntry<MVT::SimpleValueType>
118 AVX2UniformConstCostTable[] = {
119 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
121 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
122 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
123 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
124 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
127 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
129 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
131 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
134 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
135 { ISD::SHL, MVT::v16i32, 1 },
136 { ISD::SRL, MVT::v16i32, 1 },
137 { ISD::SRA, MVT::v16i32, 1 },
138 { ISD::SHL, MVT::v8i64, 1 },
139 { ISD::SRL, MVT::v8i64, 1 },
140 { ISD::SRA, MVT::v8i64, 1 },
143 if (ST->hasAVX512()) {
144 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
146 return LT.first * AVX512CostTable[Idx].Cost;
149 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
150 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
151 // customize them to detect the cases where shift amount is a scalar one.
152 { ISD::SHL, MVT::v4i32, 1 },
153 { ISD::SRL, MVT::v4i32, 1 },
154 { ISD::SRA, MVT::v4i32, 1 },
155 { ISD::SHL, MVT::v8i32, 1 },
156 { ISD::SRL, MVT::v8i32, 1 },
157 { ISD::SRA, MVT::v8i32, 1 },
158 { ISD::SHL, MVT::v2i64, 1 },
159 { ISD::SRL, MVT::v2i64, 1 },
160 { ISD::SHL, MVT::v4i64, 1 },
161 { ISD::SRL, MVT::v4i64, 1 },
164 // Look for AVX2 lowering tricks.
166 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
167 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
168 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
169 // On AVX2, a packed v16i16 shift left by a constant build_vector
170 // is lowered into a vector multiply (vpmullw).
173 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
175 return LT.first * AVX2CostTable[Idx].Cost;
178 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = {
179 // 128bit shifts take 1cy, but right shifts require negation beforehand.
180 { ISD::SHL, MVT::v16i8, 1 },
181 { ISD::SRL, MVT::v16i8, 2 },
182 { ISD::SRA, MVT::v16i8, 2 },
183 { ISD::SHL, MVT::v8i16, 1 },
184 { ISD::SRL, MVT::v8i16, 2 },
185 { ISD::SRA, MVT::v8i16, 2 },
186 { ISD::SHL, MVT::v4i32, 1 },
187 { ISD::SRL, MVT::v4i32, 2 },
188 { ISD::SRA, MVT::v4i32, 2 },
189 { ISD::SHL, MVT::v2i64, 1 },
190 { ISD::SRL, MVT::v2i64, 2 },
191 { ISD::SRA, MVT::v2i64, 2 },
192 // 256bit shifts require splitting if AVX2 didn't catch them above.
193 { ISD::SHL, MVT::v32i8, 2 },
194 { ISD::SRL, MVT::v32i8, 4 },
195 { ISD::SRA, MVT::v32i8, 4 },
196 { ISD::SHL, MVT::v16i16, 2 },
197 { ISD::SRL, MVT::v16i16, 4 },
198 { ISD::SRA, MVT::v16i16, 4 },
199 { ISD::SHL, MVT::v8i32, 2 },
200 { ISD::SRL, MVT::v8i32, 4 },
201 { ISD::SRA, MVT::v8i32, 4 },
202 { ISD::SHL, MVT::v4i64, 2 },
203 { ISD::SRL, MVT::v4i64, 4 },
204 { ISD::SRA, MVT::v4i64, 4 },
207 // Look for XOP lowering tricks.
209 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second);
211 return LT.first * XOPCostTable[Idx].Cost;
214 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = {
215 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
216 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
218 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
219 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
221 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
222 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
223 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
224 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
226 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
227 { ISD::SDIV, MVT::v32i8, 32*20 },
228 { ISD::SDIV, MVT::v16i16, 16*20 },
229 { ISD::SDIV, MVT::v8i32, 8*20 },
230 { ISD::SDIV, MVT::v4i64, 4*20 },
231 { ISD::UDIV, MVT::v32i8, 32*20 },
232 { ISD::UDIV, MVT::v16i16, 16*20 },
233 { ISD::UDIV, MVT::v8i32, 8*20 },
234 { ISD::UDIV, MVT::v4i64, 4*20 },
237 // Look for AVX2 lowering tricks for custom cases.
239 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second);
241 return LT.first * AVX2CustomCostTable[Idx].Cost;
244 static const CostTblEntry<MVT::SimpleValueType>
245 SSE2UniformConstCostTable[] = {
246 // We don't correctly identify costs of casts because they are marked as
248 // Constant splats are cheaper for the following instructions.
249 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
250 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
251 { ISD::SHL, MVT::v4i32, 1 }, // pslld
252 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
254 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
255 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
256 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
257 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
259 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
260 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
261 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
262 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
264 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
265 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
266 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
267 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
270 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
273 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
274 return LT.first * 15;
276 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
278 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
281 if (ISD == ISD::SHL &&
282 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
284 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
285 (VT == MVT::v4i32 && ST->hasSSE41()))
286 // Vector shift left by non uniform constant can be lowered
287 // into vector multiply (pmullw/pmulld).
289 if (VT == MVT::v4i32 && ST->hasSSE2())
290 // A vector shift left by non uniform constant is converted
291 // into a vector multiply; the new multiply is eventually
292 // lowered into a sequence of shuffles and 2 x pmuludq.
296 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
297 // We don't correctly identify costs of casts because they are marked as
299 // For some cases, where the shift amount is a scalar we would be able
300 // to generate better code. Unfortunately, when this is the case the value
301 // (the splat) will get hoisted out of the loop, thereby making it invisible
302 // to ISel. The cost model must return worst case assumptions because it is
303 // used for vectorization and we don't want to make vectorized code worse
305 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
306 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
307 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
308 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
309 { ISD::SHL, MVT::v4i64, 8 }, // splat+shuffle sequence.
311 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
312 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
313 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
314 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
316 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
317 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
318 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
319 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
321 // It is not a good idea to vectorize division. We have to scalarize it and
322 // in the process we will often end up having to spilling regular
323 // registers. The overhead of division is going to dominate most kernels
324 // anyways so try hard to prevent vectorization of division - it is
325 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
326 // to hide "20 cycles" for each lane.
327 { ISD::SDIV, MVT::v16i8, 16*20 },
328 { ISD::SDIV, MVT::v8i16, 8*20 },
329 { ISD::SDIV, MVT::v4i32, 4*20 },
330 { ISD::SDIV, MVT::v2i64, 2*20 },
331 { ISD::UDIV, MVT::v16i8, 16*20 },
332 { ISD::UDIV, MVT::v8i16, 8*20 },
333 { ISD::UDIV, MVT::v4i32, 4*20 },
334 { ISD::UDIV, MVT::v2i64, 2*20 },
338 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
340 return LT.first * SSE2CostTable[Idx].Cost;
343 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
344 // We don't have to scalarize unsupported ops. We can issue two half-sized
345 // operations and we only need to extract the upper YMM half.
346 // Two ops + 1 extract + 1 insert = 4.
347 { ISD::MUL, MVT::v16i16, 4 },
348 { ISD::MUL, MVT::v8i32, 4 },
349 { ISD::SUB, MVT::v8i32, 4 },
350 { ISD::ADD, MVT::v8i32, 4 },
351 { ISD::SUB, MVT::v4i64, 4 },
352 { ISD::ADD, MVT::v4i64, 4 },
353 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
354 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
355 // Because we believe v4i64 to be a legal type, we must also include the
356 // split factor of two in the cost table. Therefore, the cost here is 18
358 { ISD::MUL, MVT::v4i64, 18 },
361 // Look for AVX1 lowering tricks.
362 if (ST->hasAVX() && !ST->hasAVX2()) {
365 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
366 // sequence of extract + two vector multiply + insert.
367 if (ISD == ISD::SHL && (VT == MVT::v8i32 || VT == MVT::v16i16) &&
368 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue)
371 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
373 return LT.first * AVX1CostTable[Idx].Cost;
376 // Custom lowering of vectors.
377 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
378 // A v2i64/v4i64 and multiply is custom lowered as a series of long
379 // multiplies(3), shifts(4) and adds(2).
380 { ISD::MUL, MVT::v2i64, 9 },
381 { ISD::MUL, MVT::v4i64, 9 },
383 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
385 return LT.first * CustomLowered[Idx].Cost;
387 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
388 // 2x pmuludq, 2x shuffle.
389 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
393 // Fallback to the default implementation.
394 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
397 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
399 // We only estimate the cost of reverse and alternate shuffles.
400 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
401 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
403 if (Kind == TTI::SK_Reverse) {
404 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
406 if (LT.second.getSizeInBits() > 128)
407 Cost = 3; // Extract + insert + copy.
409 // Multiple by the number of parts.
410 return Cost * LT.first;
413 if (Kind == TTI::SK_Alternate) {
414 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
415 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
416 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
418 // The backend knows how to generate a single VEX.256 version of
419 // instruction VPBLENDW if the target supports AVX2.
420 if (ST->hasAVX2() && LT.second == MVT::v16i16)
423 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
424 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
425 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
427 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
428 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
430 // This shuffle is custom lowered into a sequence of:
431 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
432 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
434 // This shuffle is custom lowered into a long sequence of:
435 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
436 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
440 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
442 return LT.first * AVXAltShuffleTbl[Idx].Cost;
445 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
446 // These are lowered into movsd.
447 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
448 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
450 // packed float vectors with four elements are lowered into BLENDI dag
451 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
452 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
453 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
455 // This shuffle generates a single pshufw.
456 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
458 // There is no instruction that matches a v16i8 alternate shuffle.
459 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
460 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
463 if (ST->hasSSE41()) {
464 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
466 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
469 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
470 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
471 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
473 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
474 // the sequence 'shufps + pshufd'
475 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
476 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
478 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
479 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
482 if (ST->hasSSSE3()) {
483 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
485 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
488 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
489 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
490 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
492 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
493 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
495 // This is expanded into a long sequence of four extract + four insert.
496 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
498 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
499 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
502 // Fall-back (SSE3 and SSE2).
503 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
505 return LT.first * SSEAltShuffleTbl[Idx].Cost;
506 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
509 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
512 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
513 int ISD = TLI->InstructionOpcodeToISD(Opcode);
514 assert(ISD && "Invalid opcode");
516 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
517 AVX512ConversionTbl[] = {
518 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
519 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
520 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
521 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
523 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
524 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
525 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
526 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
527 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
529 // v16i1 -> v16i32 - load + broadcast
530 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
531 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
533 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
534 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
535 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
536 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
537 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
538 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
540 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
541 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
542 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
543 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
544 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
545 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
546 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
549 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
550 AVX2ConversionTbl[] = {
551 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
552 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
553 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
554 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
555 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
556 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
557 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
558 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
559 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
560 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
561 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
562 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
563 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
564 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
565 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
566 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
568 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
569 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
570 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
571 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
572 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
573 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
575 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
576 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
578 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
581 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
582 AVXConversionTbl[] = {
583 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
584 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
585 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
586 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
587 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
588 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
589 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
590 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
591 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
592 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
593 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
594 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
595 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
596 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
597 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
598 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
600 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
601 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
602 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
603 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
604 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
605 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
606 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
608 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
609 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
610 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
611 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
612 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
613 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
614 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
615 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
616 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
617 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
618 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
619 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
621 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
622 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
623 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
624 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
625 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
626 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
627 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
628 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
629 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
630 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
631 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
632 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
633 // The generic code to compute the scalar overhead is currently broken.
634 // Workaround this limitation by estimating the scalarization overhead
635 // here. We have roughly 10 instructions per scalar element.
636 // Multiply that by the vector width.
637 // FIXME: remove that when PR19268 is fixed.
638 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
639 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
641 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
642 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
643 // This node is expanded into scalarized operations but BasicTTI is overly
644 // optimistic estimating its cost. It computes 3 per element (one
645 // vector-extract, one scalar conversion and one vector-insert). The
646 // problem is that the inserts form a read-modify-write chain so latency
647 // should be factored in too. Inflating the cost per element by 1.
648 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
649 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
652 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
654 // These are somewhat magic numbers justified by looking at the output of
655 // Intel's IACA, running some kernels and making sure when we take
656 // legalization into account the throughput will be overestimated.
657 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
658 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
659 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
660 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
661 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
662 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
663 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
664 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
665 // There are faster sequences for float conversions.
666 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
667 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
668 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
669 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
670 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
671 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
672 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
673 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
676 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
677 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
679 if (ST->hasSSE2() && !ST->hasAVX()) {
681 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
683 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
686 if (ST->hasAVX512()) {
687 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
690 return AVX512ConversionTbl[Idx].Cost;
693 EVT SrcTy = TLI->getValueType(DL, Src);
694 EVT DstTy = TLI->getValueType(DL, Dst);
696 // The function getSimpleVT only handles simple value types.
697 if (!SrcTy.isSimple() || !DstTy.isSimple())
698 return BaseT::getCastInstrCost(Opcode, Dst, Src);
701 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
702 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
704 return AVX2ConversionTbl[Idx].Cost;
708 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
709 SrcTy.getSimpleVT());
711 return AVXConversionTbl[Idx].Cost;
714 return BaseT::getCastInstrCost(Opcode, Dst, Src);
717 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
718 // Legalize the type.
719 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
723 int ISD = TLI->InstructionOpcodeToISD(Opcode);
724 assert(ISD && "Invalid opcode");
726 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
727 { ISD::SETCC, MVT::v2f64, 1 },
728 { ISD::SETCC, MVT::v4f32, 1 },
729 { ISD::SETCC, MVT::v2i64, 1 },
730 { ISD::SETCC, MVT::v4i32, 1 },
731 { ISD::SETCC, MVT::v8i16, 1 },
732 { ISD::SETCC, MVT::v16i8, 1 },
735 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
736 { ISD::SETCC, MVT::v4f64, 1 },
737 { ISD::SETCC, MVT::v8f32, 1 },
738 // AVX1 does not support 8-wide integer compare.
739 { ISD::SETCC, MVT::v4i64, 4 },
740 { ISD::SETCC, MVT::v8i32, 4 },
741 { ISD::SETCC, MVT::v16i16, 4 },
742 { ISD::SETCC, MVT::v32i8, 4 },
745 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
746 { ISD::SETCC, MVT::v4i64, 1 },
747 { ISD::SETCC, MVT::v8i32, 1 },
748 { ISD::SETCC, MVT::v16i16, 1 },
749 { ISD::SETCC, MVT::v32i8, 1 },
752 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
753 { ISD::SETCC, MVT::v8i64, 1 },
754 { ISD::SETCC, MVT::v16i32, 1 },
755 { ISD::SETCC, MVT::v8f64, 1 },
756 { ISD::SETCC, MVT::v16f32, 1 },
759 if (ST->hasAVX512()) {
760 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
762 return LT.first * AVX512CostTbl[Idx].Cost;
766 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
768 return LT.first * AVX2CostTbl[Idx].Cost;
772 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
774 return LT.first * AVX1CostTbl[Idx].Cost;
777 if (ST->hasSSE42()) {
778 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
780 return LT.first * SSE42CostTbl[Idx].Cost;
783 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
786 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
787 assert(Val->isVectorTy() && "This must be a vector type");
790 // Legalize the type.
791 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
793 // This type is legalized to a scalar type.
794 if (!LT.second.isVector())
797 // The type may be split. Normalize the index to the new type.
798 unsigned Width = LT.second.getVectorNumElements();
799 Index = Index % Width;
801 // Floating point scalars are already located in index #0.
802 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
806 return BaseT::getVectorInstrCost(Opcode, Val, Index);
809 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
810 assert (Ty->isVectorTy() && "Can only scalarize vectors");
813 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
815 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
817 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
823 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
824 unsigned AddressSpace) {
825 // Handle non-power-of-two vectors such as <3 x float>
826 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
827 unsigned NumElem = VTy->getVectorNumElements();
829 // Handle a few common cases:
831 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
832 // Cost = 64 bit store + extract + 32 bit store.
836 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
837 // Cost = 128 bit store + unpack + 64 bit store.
840 // Assume that all other non-power-of-two numbers are scalarized.
841 if (!isPowerOf2_32(NumElem)) {
842 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
844 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
845 Opcode == Instruction::Store);
846 return NumElem * Cost + SplitCost;
850 // Legalize the type.
851 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
852 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
855 // Each load/store unit costs 1.
856 int Cost = LT.first * 1;
858 // On Sandybridge 256bit load/stores are double pumped
859 // (but not on Haswell).
860 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
866 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
868 unsigned AddressSpace) {
869 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
871 // To calculate scalar take the regular cost, without mask
872 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
874 unsigned NumElem = SrcVTy->getVectorNumElements();
876 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
877 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy, 1)) ||
878 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy, 1)) ||
879 !isPowerOf2_32(NumElem)) {
881 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
882 int ScalarCompareCost = getCmpSelInstrCost(
883 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), NULL);
884 int BranchCost = getCFInstrCost(Instruction::Br);
885 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
887 int ValueSplitCost = getScalarizationOverhead(
888 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
890 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
891 Alignment, AddressSpace);
892 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
895 // Legalize the type.
896 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
898 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
899 LT.second.getVectorNumElements() == NumElem)
900 // Promotion requires expand/truncate for data and a shuffle for mask.
901 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, 0) +
902 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, 0);
904 else if (LT.second.getVectorNumElements() > NumElem) {
905 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
906 LT.second.getVectorNumElements());
907 // Expanding requires fill mask with zeroes
908 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
910 if (!ST->hasAVX512())
911 return Cost + LT.first*4; // Each maskmov costs 4
913 // AVX-512 masked load/store is cheapper
914 return Cost+LT.first;
917 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
918 // Address computations in vectorized code with non-consecutive addresses will
919 // likely result in more instructions compared to scalar code where the
920 // computation can more often be merged into the index mode. The resulting
921 // extra micro-ops can significantly decrease throughput.
922 unsigned NumVectorInstToHideOverhead = 10;
924 if (Ty->isVectorTy() && IsComplex)
925 return NumVectorInstToHideOverhead;
927 return BaseT::getAddressComputationCost(Ty, IsComplex);
930 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
933 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
937 int ISD = TLI->InstructionOpcodeToISD(Opcode);
938 assert(ISD && "Invalid opcode");
940 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
941 // and make it as the cost.
943 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
944 { ISD::FADD, MVT::v2f64, 2 },
945 { ISD::FADD, MVT::v4f32, 4 },
946 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
947 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
948 { ISD::ADD, MVT::v8i16, 5 },
951 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
952 { ISD::FADD, MVT::v4f32, 4 },
953 { ISD::FADD, MVT::v4f64, 5 },
954 { ISD::FADD, MVT::v8f32, 7 },
955 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
956 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
957 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
958 { ISD::ADD, MVT::v8i16, 5 },
959 { ISD::ADD, MVT::v8i32, 5 },
962 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
963 { ISD::FADD, MVT::v2f64, 2 },
964 { ISD::FADD, MVT::v4f32, 4 },
965 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
966 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
967 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
970 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
971 { ISD::FADD, MVT::v4f32, 3 },
972 { ISD::FADD, MVT::v4f64, 3 },
973 { ISD::FADD, MVT::v8f32, 4 },
974 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
975 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
976 { ISD::ADD, MVT::v4i64, 3 },
977 { ISD::ADD, MVT::v8i16, 4 },
978 { ISD::ADD, MVT::v8i32, 5 },
983 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
985 return LT.first * AVX1CostTblPairWise[Idx].Cost;
988 if (ST->hasSSE42()) {
989 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
991 return LT.first * SSE42CostTblPairWise[Idx].Cost;
995 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
997 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
1000 if (ST->hasSSE42()) {
1001 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
1003 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
1007 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1010 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1011 /// method might only calculate a fraction of a larger immediate. Therefore it
1012 /// is valid to return a cost of ZERO.
1013 int X86TTIImpl::getIntImmCost(int64_t Val) {
1015 return TTI::TCC_Free;
1018 return TTI::TCC_Basic;
1020 return 2 * TTI::TCC_Basic;
1023 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1024 assert(Ty->isIntegerTy());
1026 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1030 // Never hoist constants larger than 128bit, because this might lead to
1031 // incorrect code generation or assertions in codegen.
1032 // Fixme: Create a cost model for types larger than i128 once the codegen
1033 // issues have been fixed.
1035 return TTI::TCC_Free;
1038 return TTI::TCC_Free;
1040 // Sign-extend all constants to a multiple of 64-bit.
1043 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1045 // Split the constant into 64-bit chunks and calculate the cost for each
1048 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1049 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1050 int64_t Val = Tmp.getSExtValue();
1051 Cost += getIntImmCost(Val);
1053 // We need at least one instruction to materialze the constant.
1054 return std::max(1, Cost);
1057 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1059 assert(Ty->isIntegerTy());
1061 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1062 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1063 // here, so that constant hoisting will ignore this constant.
1065 return TTI::TCC_Free;
1067 unsigned ImmIdx = ~0U;
1070 return TTI::TCC_Free;
1071 case Instruction::GetElementPtr:
1072 // Always hoist the base address of a GetElementPtr. This prevents the
1073 // creation of new constants for every base constant that gets constant
1074 // folded with the offset.
1076 return 2 * TTI::TCC_Basic;
1077 return TTI::TCC_Free;
1078 case Instruction::Store:
1081 case Instruction::Add:
1082 case Instruction::Sub:
1083 case Instruction::Mul:
1084 case Instruction::UDiv:
1085 case Instruction::SDiv:
1086 case Instruction::URem:
1087 case Instruction::SRem:
1088 case Instruction::And:
1089 case Instruction::Or:
1090 case Instruction::Xor:
1091 case Instruction::ICmp:
1094 // Always return TCC_Free for the shift value of a shift instruction.
1095 case Instruction::Shl:
1096 case Instruction::LShr:
1097 case Instruction::AShr:
1099 return TTI::TCC_Free;
1101 case Instruction::Trunc:
1102 case Instruction::ZExt:
1103 case Instruction::SExt:
1104 case Instruction::IntToPtr:
1105 case Instruction::PtrToInt:
1106 case Instruction::BitCast:
1107 case Instruction::PHI:
1108 case Instruction::Call:
1109 case Instruction::Select:
1110 case Instruction::Ret:
1111 case Instruction::Load:
1115 if (Idx == ImmIdx) {
1116 int NumConstants = (BitSize + 63) / 64;
1117 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1118 return (Cost <= NumConstants * TTI::TCC_Basic)
1119 ? static_cast<int>(TTI::TCC_Free)
1123 return X86TTIImpl::getIntImmCost(Imm, Ty);
1126 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1128 assert(Ty->isIntegerTy());
1130 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1131 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1132 // here, so that constant hoisting will ignore this constant.
1134 return TTI::TCC_Free;
1138 return TTI::TCC_Free;
1139 case Intrinsic::sadd_with_overflow:
1140 case Intrinsic::uadd_with_overflow:
1141 case Intrinsic::ssub_with_overflow:
1142 case Intrinsic::usub_with_overflow:
1143 case Intrinsic::smul_with_overflow:
1144 case Intrinsic::umul_with_overflow:
1145 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1146 return TTI::TCC_Free;
1148 case Intrinsic::experimental_stackmap:
1149 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1150 return TTI::TCC_Free;
1152 case Intrinsic::experimental_patchpoint_void:
1153 case Intrinsic::experimental_patchpoint_i64:
1154 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1155 return TTI::TCC_Free;
1158 return X86TTIImpl::getIntImmCost(Imm, Ty);
1161 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy, int Consecutive) {
1162 int DataWidth = DataTy->getPrimitiveSizeInBits();
1164 // Todo: AVX512 allows gather/scatter, works with strided and random as well
1165 if ((DataWidth < 32) || (Consecutive == 0))
1167 if (ST->hasAVX512() || ST->hasAVX2())
1172 bool X86TTIImpl::isLegalMaskedStore(Type *DataType, int Consecutive) {
1173 return isLegalMaskedLoad(DataType, Consecutive);
1176 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1177 const Function *Callee) const {
1178 const TargetMachine &TM = getTLI()->getTargetMachine();
1180 // Work this as a subsetting of subtarget features.
1181 const FeatureBitset &CallerBits =
1182 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1183 const FeatureBitset &CalleeBits =
1184 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1186 // FIXME: This is likely too limiting as it will include subtarget features
1187 // that we might not care about for inlining, but it is conservatively
1189 return (CallerBits & CalleeBits) == CalleeBits;