1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "x86tti"
29 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
35 TargetTransformInfo::PopcntSupportKind
36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
38 // TODO: Currently the __builtin_popcount() implementation using SSE3
39 // instructions is inefficient. Once the problem is fixed, we should
40 // call ST->hasSSE3() instead of ST->hasPOPCNT().
41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
45 if (Vector && !ST->hasSSE1())
49 if (Vector && ST->hasAVX512())
56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
58 if (ST->hasAVX512()) return 512;
59 if (ST->hasAVX()) return 256;
60 if (ST->hasSSE1()) return 128;
70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
71 // If the loop will not be vectorized, don't interleave the loop.
72 // Let regular unroll to unroll the loop, which saves the overflow
73 // check and memory check cost.
80 // Sandybridge and Haswell have multiple execution ports and pipelined
88 int X86TTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
95 int ISD = TLI->InstructionOpcodeToISD(Opcode);
96 assert(ISD && "Invalid opcode");
98 if (ISD == ISD::SDIV &&
99 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
101 // On X86, vector signed division by constants power-of-two are
102 // normally expanded to the sequence SRA + SRL + ADD + SRA.
103 // The OperandValue properties many not be same as that of previous
104 // operation;conservatively assume OP_None.
105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
106 Op2Info, TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
118 static const CostTblEntry AVX2UniformConstCostTable[] = {
119 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
121 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
122 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
123 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
124 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
127 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
129 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
131 return LT.first * Entry->Cost;
134 static const CostTblEntry AVX512CostTable[] = {
135 { ISD::SHL, MVT::v16i32, 1 },
136 { ISD::SRL, MVT::v16i32, 1 },
137 { ISD::SRA, MVT::v16i32, 1 },
138 { ISD::SHL, MVT::v8i64, 1 },
139 { ISD::SRL, MVT::v8i64, 1 },
140 { ISD::SRA, MVT::v8i64, 1 },
143 if (ST->hasAVX512()) {
144 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
145 return LT.first * Entry->Cost;
148 static const CostTblEntry AVX2CostTable[] = {
149 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
150 // customize them to detect the cases where shift amount is a scalar one.
151 { ISD::SHL, MVT::v4i32, 1 },
152 { ISD::SRL, MVT::v4i32, 1 },
153 { ISD::SRA, MVT::v4i32, 1 },
154 { ISD::SHL, MVT::v8i32, 1 },
155 { ISD::SRL, MVT::v8i32, 1 },
156 { ISD::SRA, MVT::v8i32, 1 },
157 { ISD::SHL, MVT::v2i64, 1 },
158 { ISD::SRL, MVT::v2i64, 1 },
159 { ISD::SHL, MVT::v4i64, 1 },
160 { ISD::SRL, MVT::v4i64, 1 },
163 // Look for AVX2 lowering tricks.
165 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
166 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
167 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
168 // On AVX2, a packed v16i16 shift left by a constant build_vector
169 // is lowered into a vector multiply (vpmullw).
172 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
173 return LT.first * Entry->Cost;
176 static const CostTblEntry XOPCostTable[] = {
177 // 128bit shifts take 1cy, but right shifts require negation beforehand.
178 { ISD::SHL, MVT::v16i8, 1 },
179 { ISD::SRL, MVT::v16i8, 2 },
180 { ISD::SRA, MVT::v16i8, 2 },
181 { ISD::SHL, MVT::v8i16, 1 },
182 { ISD::SRL, MVT::v8i16, 2 },
183 { ISD::SRA, MVT::v8i16, 2 },
184 { ISD::SHL, MVT::v4i32, 1 },
185 { ISD::SRL, MVT::v4i32, 2 },
186 { ISD::SRA, MVT::v4i32, 2 },
187 { ISD::SHL, MVT::v2i64, 1 },
188 { ISD::SRL, MVT::v2i64, 2 },
189 { ISD::SRA, MVT::v2i64, 2 },
190 // 256bit shifts require splitting if AVX2 didn't catch them above.
191 { ISD::SHL, MVT::v32i8, 2 },
192 { ISD::SRL, MVT::v32i8, 4 },
193 { ISD::SRA, MVT::v32i8, 4 },
194 { ISD::SHL, MVT::v16i16, 2 },
195 { ISD::SRL, MVT::v16i16, 4 },
196 { ISD::SRA, MVT::v16i16, 4 },
197 { ISD::SHL, MVT::v8i32, 2 },
198 { ISD::SRL, MVT::v8i32, 4 },
199 { ISD::SRA, MVT::v8i32, 4 },
200 { ISD::SHL, MVT::v4i64, 2 },
201 { ISD::SRL, MVT::v4i64, 4 },
202 { ISD::SRA, MVT::v4i64, 4 },
205 // Look for XOP lowering tricks.
207 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
208 return LT.first * Entry->Cost;
211 static const CostTblEntry AVX2CustomCostTable[] = {
212 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
213 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
215 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
216 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
218 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
219 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
220 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
221 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
223 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
224 { ISD::SDIV, MVT::v32i8, 32*20 },
225 { ISD::SDIV, MVT::v16i16, 16*20 },
226 { ISD::SDIV, MVT::v8i32, 8*20 },
227 { ISD::SDIV, MVT::v4i64, 4*20 },
228 { ISD::UDIV, MVT::v32i8, 32*20 },
229 { ISD::UDIV, MVT::v16i16, 16*20 },
230 { ISD::UDIV, MVT::v8i32, 8*20 },
231 { ISD::UDIV, MVT::v4i64, 4*20 },
234 // Look for AVX2 lowering tricks for custom cases.
236 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
238 return LT.first * Entry->Cost;
241 static const CostTblEntry
242 SSE2UniformConstCostTable[] = {
243 // We don't correctly identify costs of casts because they are marked as
245 // Constant splats are cheaper for the following instructions.
246 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
247 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
248 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
249 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
250 { ISD::SHL, MVT::v4i32, 1 }, // pslld
251 { ISD::SHL, MVT::v8i32, 2 }, // pslld
252 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
253 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
255 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
256 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
257 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
258 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
259 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
260 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
261 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
262 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
264 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
265 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
266 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
267 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
268 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
269 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
270 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
271 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
273 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
274 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
275 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
276 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
279 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
282 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
283 return LT.first * 15;
285 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
287 return LT.first * Entry->Cost;
290 if (ISD == ISD::SHL &&
291 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
293 // Vector shift left by non uniform constant can be lowered
294 // into vector multiply (pmullw/pmulld).
295 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
296 (VT == MVT::v4i32 && ST->hasSSE41()))
299 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
300 // sequence of extract + two vector multiply + insert.
301 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
302 (ST->hasAVX() && !ST->hasAVX2()))
305 // A vector shift left by non uniform constant is converted
306 // into a vector multiply; the new multiply is eventually
307 // lowered into a sequence of shuffles and 2 x pmuludq.
308 if (VT == MVT::v4i32 && ST->hasSSE2())
312 static const CostTblEntry SSE2CostTable[] = {
313 // We don't correctly identify costs of casts because they are marked as
315 // For some cases, where the shift amount is a scalar we would be able
316 // to generate better code. Unfortunately, when this is the case the value
317 // (the splat) will get hoisted out of the loop, thereby making it invisible
318 // to ISel. The cost model must return worst case assumptions because it is
319 // used for vectorization and we don't want to make vectorized code worse
321 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
322 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
323 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
324 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
325 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
326 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
327 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
328 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
330 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
331 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
332 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
333 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
334 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
335 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
336 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
337 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
339 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
340 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
341 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
342 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
343 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
344 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
345 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
346 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
348 // It is not a good idea to vectorize division. We have to scalarize it and
349 // in the process we will often end up having to spilling regular
350 // registers. The overhead of division is going to dominate most kernels
351 // anyways so try hard to prevent vectorization of division - it is
352 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
353 // to hide "20 cycles" for each lane.
354 { ISD::SDIV, MVT::v16i8, 16*20 },
355 { ISD::SDIV, MVT::v8i16, 8*20 },
356 { ISD::SDIV, MVT::v4i32, 4*20 },
357 { ISD::SDIV, MVT::v2i64, 2*20 },
358 { ISD::UDIV, MVT::v16i8, 16*20 },
359 { ISD::UDIV, MVT::v8i16, 8*20 },
360 { ISD::UDIV, MVT::v4i32, 4*20 },
361 { ISD::UDIV, MVT::v2i64, 2*20 },
365 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
366 return LT.first * Entry->Cost;
369 static const CostTblEntry AVX1CostTable[] = {
370 // We don't have to scalarize unsupported ops. We can issue two half-sized
371 // operations and we only need to extract the upper YMM half.
372 // Two ops + 1 extract + 1 insert = 4.
373 { ISD::MUL, MVT::v16i16, 4 },
374 { ISD::MUL, MVT::v8i32, 4 },
375 { ISD::SUB, MVT::v8i32, 4 },
376 { ISD::ADD, MVT::v8i32, 4 },
377 { ISD::SUB, MVT::v4i64, 4 },
378 { ISD::ADD, MVT::v4i64, 4 },
379 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
380 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
381 // Because we believe v4i64 to be a legal type, we must also include the
382 // split factor of two in the cost table. Therefore, the cost here is 18
384 { ISD::MUL, MVT::v4i64, 18 },
387 // Look for AVX1 lowering tricks.
388 if (ST->hasAVX() && !ST->hasAVX2()) {
391 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
392 return LT.first * Entry->Cost;
395 // Custom lowering of vectors.
396 static const CostTblEntry CustomLowered[] = {
397 // A v2i64/v4i64 and multiply is custom lowered as a series of long
398 // multiplies(3), shifts(4) and adds(2).
399 { ISD::MUL, MVT::v2i64, 9 },
400 { ISD::MUL, MVT::v4i64, 9 },
402 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
403 return LT.first * Entry->Cost;
405 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
406 // 2x pmuludq, 2x shuffle.
407 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
411 // Fallback to the default implementation.
412 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
415 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
417 // We only estimate the cost of reverse and alternate shuffles.
418 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
419 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
421 if (Kind == TTI::SK_Reverse) {
422 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
424 if (LT.second.getSizeInBits() > 128)
425 Cost = 3; // Extract + insert + copy.
427 // Multiple by the number of parts.
428 return Cost * LT.first;
431 if (Kind == TTI::SK_Alternate) {
432 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
433 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
434 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
436 // The backend knows how to generate a single VEX.256 version of
437 // instruction VPBLENDW if the target supports AVX2.
438 if (ST->hasAVX2() && LT.second == MVT::v16i16)
441 static const CostTblEntry AVXAltShuffleTbl[] = {
442 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
443 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
445 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
446 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
448 // This shuffle is custom lowered into a sequence of:
449 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
450 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
452 // This shuffle is custom lowered into a long sequence of:
453 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
454 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
458 if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl,
459 ISD::VECTOR_SHUFFLE, LT.second))
460 return LT.first * Entry->Cost;
462 static const CostTblEntry SSE41AltShuffleTbl[] = {
463 // These are lowered into movsd.
464 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
465 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
467 // packed float vectors with four elements are lowered into BLENDI dag
468 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
469 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
470 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
472 // This shuffle generates a single pshufw.
473 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
475 // There is no instruction that matches a v16i8 alternate shuffle.
476 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
477 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
481 if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE,
483 return LT.first * Entry->Cost;
485 static const CostTblEntry SSSE3AltShuffleTbl[] = {
486 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
487 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
489 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
490 // the sequence 'shufps + pshufd'
491 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
492 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
494 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
495 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
499 if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl,
500 ISD::VECTOR_SHUFFLE, LT.second))
501 return LT.first * Entry->Cost;
503 static const CostTblEntry SSEAltShuffleTbl[] = {
504 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
505 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
507 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
508 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
510 // This is expanded into a long sequence of four extract + four insert.
511 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
513 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
514 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
517 // Fall-back (SSE3 and SSE2).
518 if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl,
519 ISD::VECTOR_SHUFFLE, LT.second))
520 return LT.first * Entry->Cost;
521 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
524 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
527 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
528 int ISD = TLI->InstructionOpcodeToISD(Opcode);
529 assert(ISD && "Invalid opcode");
531 static const TypeConversionCostTblEntry AVX512ConversionTbl[] = {
532 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
533 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
534 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
535 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
537 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
538 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
539 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
540 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
541 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
543 // v16i1 -> v16i32 - load + broadcast
544 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
545 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
547 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
548 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
549 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
550 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
551 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
552 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
554 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
555 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
556 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
557 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
558 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
559 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
560 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
563 static const TypeConversionCostTblEntry AVX2ConversionTbl[] = {
564 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
565 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
566 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
567 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
568 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
569 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
570 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
571 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
572 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
573 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
574 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
575 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
576 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
577 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
578 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
579 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
581 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
582 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
583 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
584 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
585 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
586 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
588 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
589 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
591 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
594 static const TypeConversionCostTblEntry AVXConversionTbl[] = {
595 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
596 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
597 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
598 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
599 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
600 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
601 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
602 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
603 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
604 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
605 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
606 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
607 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
608 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
609 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
610 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
612 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
613 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
614 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
615 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
616 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
617 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
618 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
620 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
621 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
622 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
623 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
624 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
625 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
626 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
627 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
628 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
629 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
630 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
631 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
633 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
634 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
635 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
636 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
637 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
638 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
639 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
640 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
641 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
642 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
643 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
644 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
645 // The generic code to compute the scalar overhead is currently broken.
646 // Workaround this limitation by estimating the scalarization overhead
647 // here. We have roughly 10 instructions per scalar element.
648 // Multiply that by the vector width.
649 // FIXME: remove that when PR19268 is fixed.
650 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
651 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
653 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
654 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
655 // This node is expanded into scalarized operations but BasicTTI is overly
656 // optimistic estimating its cost. It computes 3 per element (one
657 // vector-extract, one scalar conversion and one vector-insert). The
658 // problem is that the inserts form a read-modify-write chain so latency
659 // should be factored in too. Inflating the cost per element by 1.
660 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
661 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
664 static const TypeConversionCostTblEntry SSE2ConvTbl[] = {
665 // These are somewhat magic numbers justified by looking at the output of
666 // Intel's IACA, running some kernels and making sure when we take
667 // legalization into account the throughput will be overestimated.
668 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
669 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
670 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
671 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
672 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
673 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
674 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
675 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
676 // There are faster sequences for float conversions.
677 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
678 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
679 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
680 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
681 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
682 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
683 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
684 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
687 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
688 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
690 if (ST->hasSSE2() && !ST->hasAVX()) {
691 if (const auto *Entry = ConvertCostTableLookup(SSE2ConvTbl, ISD,
692 LTDest.second, LTSrc.second))
693 return LTSrc.first * Entry->Cost;
696 if (ST->hasAVX512()) {
697 if (const auto *Entry = ConvertCostTableLookup(AVX512ConversionTbl, ISD,
698 LTDest.second, LTSrc.second))
702 EVT SrcTy = TLI->getValueType(DL, Src);
703 EVT DstTy = TLI->getValueType(DL, Dst);
705 // The function getSimpleVT only handles simple value types.
706 if (!SrcTy.isSimple() || !DstTy.isSimple())
707 return BaseT::getCastInstrCost(Opcode, Dst, Src);
710 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
712 SrcTy.getSimpleVT()))
717 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
719 SrcTy.getSimpleVT()))
723 return BaseT::getCastInstrCost(Opcode, Dst, Src);
726 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
727 // Legalize the type.
728 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
732 int ISD = TLI->InstructionOpcodeToISD(Opcode);
733 assert(ISD && "Invalid opcode");
735 static const CostTblEntry SSE42CostTbl[] = {
736 { ISD::SETCC, MVT::v2f64, 1 },
737 { ISD::SETCC, MVT::v4f32, 1 },
738 { ISD::SETCC, MVT::v2i64, 1 },
739 { ISD::SETCC, MVT::v4i32, 1 },
740 { ISD::SETCC, MVT::v8i16, 1 },
741 { ISD::SETCC, MVT::v16i8, 1 },
744 static const CostTblEntry AVX1CostTbl[] = {
745 { ISD::SETCC, MVT::v4f64, 1 },
746 { ISD::SETCC, MVT::v8f32, 1 },
747 // AVX1 does not support 8-wide integer compare.
748 { ISD::SETCC, MVT::v4i64, 4 },
749 { ISD::SETCC, MVT::v8i32, 4 },
750 { ISD::SETCC, MVT::v16i16, 4 },
751 { ISD::SETCC, MVT::v32i8, 4 },
754 static const CostTblEntry AVX2CostTbl[] = {
755 { ISD::SETCC, MVT::v4i64, 1 },
756 { ISD::SETCC, MVT::v8i32, 1 },
757 { ISD::SETCC, MVT::v16i16, 1 },
758 { ISD::SETCC, MVT::v32i8, 1 },
761 static const CostTblEntry AVX512CostTbl[] = {
762 { ISD::SETCC, MVT::v8i64, 1 },
763 { ISD::SETCC, MVT::v16i32, 1 },
764 { ISD::SETCC, MVT::v8f64, 1 },
765 { ISD::SETCC, MVT::v16f32, 1 },
769 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
770 return LT.first * Entry->Cost;
773 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
774 return LT.first * Entry->Cost;
777 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
778 return LT.first * Entry->Cost;
781 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
782 return LT.first * Entry->Cost;
784 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
787 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
788 assert(Val->isVectorTy() && "This must be a vector type");
791 // Legalize the type.
792 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
794 // This type is legalized to a scalar type.
795 if (!LT.second.isVector())
798 // The type may be split. Normalize the index to the new type.
799 unsigned Width = LT.second.getVectorNumElements();
800 Index = Index % Width;
802 // Floating point scalars are already located in index #0.
803 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
807 return BaseT::getVectorInstrCost(Opcode, Val, Index);
810 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
811 assert (Ty->isVectorTy() && "Can only scalarize vectors");
814 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
816 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
818 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
824 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
825 unsigned AddressSpace) {
826 // Handle non-power-of-two vectors such as <3 x float>
827 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
828 unsigned NumElem = VTy->getVectorNumElements();
830 // Handle a few common cases:
832 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
833 // Cost = 64 bit store + extract + 32 bit store.
837 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
838 // Cost = 128 bit store + unpack + 64 bit store.
841 // Assume that all other non-power-of-two numbers are scalarized.
842 if (!isPowerOf2_32(NumElem)) {
843 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
845 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
846 Opcode == Instruction::Store);
847 return NumElem * Cost + SplitCost;
851 // Legalize the type.
852 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
853 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
856 // Each load/store unit costs 1.
857 int Cost = LT.first * 1;
859 // On Sandybridge 256bit load/stores are double pumped
860 // (but not on Haswell).
861 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
867 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
869 unsigned AddressSpace) {
870 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
872 // To calculate scalar take the regular cost, without mask
873 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
875 unsigned NumElem = SrcVTy->getVectorNumElements();
877 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
878 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
879 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
880 !isPowerOf2_32(NumElem)) {
882 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
883 int ScalarCompareCost = getCmpSelInstrCost(
884 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
885 int BranchCost = getCFInstrCost(Instruction::Br);
886 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
888 int ValueSplitCost = getScalarizationOverhead(
889 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
891 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
892 Alignment, AddressSpace);
893 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
896 // Legalize the type.
897 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
898 auto VT = TLI->getValueType(DL, SrcVTy);
900 if (VT.isSimple() && LT.second != VT.getSimpleVT() &&
901 LT.second.getVectorNumElements() == NumElem)
902 // Promotion requires expand/truncate for data and a shuffle for mask.
903 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
904 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
906 else if (LT.second.getVectorNumElements() > NumElem) {
907 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
908 LT.second.getVectorNumElements());
909 // Expanding requires fill mask with zeroes
910 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
912 if (!ST->hasAVX512())
913 return Cost + LT.first*4; // Each maskmov costs 4
915 // AVX-512 masked load/store is cheapper
916 return Cost+LT.first;
919 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
920 // Address computations in vectorized code with non-consecutive addresses will
921 // likely result in more instructions compared to scalar code where the
922 // computation can more often be merged into the index mode. The resulting
923 // extra micro-ops can significantly decrease throughput.
924 unsigned NumVectorInstToHideOverhead = 10;
926 if (Ty->isVectorTy() && IsComplex)
927 return NumVectorInstToHideOverhead;
929 return BaseT::getAddressComputationCost(Ty, IsComplex);
932 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
935 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
939 int ISD = TLI->InstructionOpcodeToISD(Opcode);
940 assert(ISD && "Invalid opcode");
942 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
943 // and make it as the cost.
945 static const CostTblEntry SSE42CostTblPairWise[] = {
946 { ISD::FADD, MVT::v2f64, 2 },
947 { ISD::FADD, MVT::v4f32, 4 },
948 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
949 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
950 { ISD::ADD, MVT::v8i16, 5 },
953 static const CostTblEntry AVX1CostTblPairWise[] = {
954 { ISD::FADD, MVT::v4f32, 4 },
955 { ISD::FADD, MVT::v4f64, 5 },
956 { ISD::FADD, MVT::v8f32, 7 },
957 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
958 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
959 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
960 { ISD::ADD, MVT::v8i16, 5 },
961 { ISD::ADD, MVT::v8i32, 5 },
964 static const CostTblEntry SSE42CostTblNoPairWise[] = {
965 { ISD::FADD, MVT::v2f64, 2 },
966 { ISD::FADD, MVT::v4f32, 4 },
967 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
968 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
969 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
972 static const CostTblEntry AVX1CostTblNoPairWise[] = {
973 { ISD::FADD, MVT::v4f32, 3 },
974 { ISD::FADD, MVT::v4f64, 3 },
975 { ISD::FADD, MVT::v8f32, 4 },
976 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
977 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
978 { ISD::ADD, MVT::v4i64, 3 },
979 { ISD::ADD, MVT::v8i16, 4 },
980 { ISD::ADD, MVT::v8i32, 5 },
985 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
986 return LT.first * Entry->Cost;
989 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
990 return LT.first * Entry->Cost;
993 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
994 return LT.first * Entry->Cost;
997 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
998 return LT.first * Entry->Cost;
1001 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1004 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1005 /// method might only calculate a fraction of a larger immediate. Therefore it
1006 /// is valid to return a cost of ZERO.
1007 int X86TTIImpl::getIntImmCost(int64_t Val) {
1009 return TTI::TCC_Free;
1012 return TTI::TCC_Basic;
1014 return 2 * TTI::TCC_Basic;
1017 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1018 assert(Ty->isIntegerTy());
1020 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1024 // Never hoist constants larger than 128bit, because this might lead to
1025 // incorrect code generation or assertions in codegen.
1026 // Fixme: Create a cost model for types larger than i128 once the codegen
1027 // issues have been fixed.
1029 return TTI::TCC_Free;
1032 return TTI::TCC_Free;
1034 // Sign-extend all constants to a multiple of 64-bit.
1037 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1039 // Split the constant into 64-bit chunks and calculate the cost for each
1042 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1043 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1044 int64_t Val = Tmp.getSExtValue();
1045 Cost += getIntImmCost(Val);
1047 // We need at least one instruction to materialze the constant.
1048 return std::max(1, Cost);
1051 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1053 assert(Ty->isIntegerTy());
1055 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1056 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1057 // here, so that constant hoisting will ignore this constant.
1059 return TTI::TCC_Free;
1061 unsigned ImmIdx = ~0U;
1064 return TTI::TCC_Free;
1065 case Instruction::GetElementPtr:
1066 // Always hoist the base address of a GetElementPtr. This prevents the
1067 // creation of new constants for every base constant that gets constant
1068 // folded with the offset.
1070 return 2 * TTI::TCC_Basic;
1071 return TTI::TCC_Free;
1072 case Instruction::Store:
1075 case Instruction::And:
1076 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1077 // by using a 32-bit operation with implicit zero extension. Detect such
1078 // immediates here as the normal path expects bit 31 to be sign extended.
1079 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1080 return TTI::TCC_Free;
1082 case Instruction::Add:
1083 case Instruction::Sub:
1084 case Instruction::Mul:
1085 case Instruction::UDiv:
1086 case Instruction::SDiv:
1087 case Instruction::URem:
1088 case Instruction::SRem:
1089 case Instruction::Or:
1090 case Instruction::Xor:
1091 case Instruction::ICmp:
1094 // Always return TCC_Free for the shift value of a shift instruction.
1095 case Instruction::Shl:
1096 case Instruction::LShr:
1097 case Instruction::AShr:
1099 return TTI::TCC_Free;
1101 case Instruction::Trunc:
1102 case Instruction::ZExt:
1103 case Instruction::SExt:
1104 case Instruction::IntToPtr:
1105 case Instruction::PtrToInt:
1106 case Instruction::BitCast:
1107 case Instruction::PHI:
1108 case Instruction::Call:
1109 case Instruction::Select:
1110 case Instruction::Ret:
1111 case Instruction::Load:
1115 if (Idx == ImmIdx) {
1116 int NumConstants = (BitSize + 63) / 64;
1117 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1118 return (Cost <= NumConstants * TTI::TCC_Basic)
1119 ? static_cast<int>(TTI::TCC_Free)
1123 return X86TTIImpl::getIntImmCost(Imm, Ty);
1126 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1128 assert(Ty->isIntegerTy());
1130 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1131 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1132 // here, so that constant hoisting will ignore this constant.
1134 return TTI::TCC_Free;
1138 return TTI::TCC_Free;
1139 case Intrinsic::sadd_with_overflow:
1140 case Intrinsic::uadd_with_overflow:
1141 case Intrinsic::ssub_with_overflow:
1142 case Intrinsic::usub_with_overflow:
1143 case Intrinsic::smul_with_overflow:
1144 case Intrinsic::umul_with_overflow:
1145 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1146 return TTI::TCC_Free;
1148 case Intrinsic::experimental_stackmap:
1149 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1150 return TTI::TCC_Free;
1152 case Intrinsic::experimental_patchpoint_void:
1153 case Intrinsic::experimental_patchpoint_i64:
1154 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1155 return TTI::TCC_Free;
1158 return X86TTIImpl::getIntImmCost(Imm, Ty);
1161 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
1162 Type *ScalarTy = DataTy->getScalarType();
1163 int DataWidth = isa<PointerType>(ScalarTy) ?
1164 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1166 return (DataWidth >= 32 && ST->hasAVX2());
1169 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
1170 return isLegalMaskedLoad(DataType);
1173 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
1174 // This function is called now in two cases: from the Loop Vectorizer
1175 // and from the Scalarizer.
1176 // When the Loop Vectorizer asks about legality of the feature,
1177 // the vectorization factor is not calculated yet. The Loop Vectorizer
1178 // sends a scalar type and the decision is based on the width of the
1180 // Later on, the cost model will estimate usage this intrinsic based on
1182 // The Scalarizer asks again about legality. It sends a vector type.
1183 // In this case we can reject non-power-of-2 vectors.
1184 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
1186 Type *ScalarTy = DataTy->getScalarType();
1187 int DataWidth = isa<PointerType>(ScalarTy) ?
1188 DL.getPointerSizeInBits() : ScalarTy->getPrimitiveSizeInBits();
1190 // AVX-512 allows gather and scatter
1191 return DataWidth >= 32 && ST->hasAVX512();
1194 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
1195 return isLegalMaskedGather(DataType);
1198 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1199 const Function *Callee) const {
1200 const TargetMachine &TM = getTLI()->getTargetMachine();
1202 // Work this as a subsetting of subtarget features.
1203 const FeatureBitset &CallerBits =
1204 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1205 const FeatureBitset &CalleeBits =
1206 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1208 // FIXME: This is likely too limiting as it will include subtarget features
1209 // that we might not care about for inlining, but it is conservatively
1211 return (CallerBits & CalleeBits) == CalleeBits;