1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "x86tti"
29 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
35 TargetTransformInfo::PopcntSupportKind
36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
38 // TODO: Currently the __builtin_popcount() implementation using SSE3
39 // instructions is inefficient. Once the problem is fixed, we should
40 // call ST->hasSSE3() instead of ST->hasPOPCNT().
41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
45 if (Vector && !ST->hasSSE1())
49 if (Vector && ST->hasAVX512())
56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
58 if (ST->hasAVX512()) return 512;
59 if (ST->hasAVX()) return 256;
60 if (ST->hasSSE1()) return 128;
70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
71 // If the loop will not be vectorized, don't interleave the loop.
72 // Let regular unroll to unroll the loop, which saves the overflow
73 // check and memory check cost.
80 // Sandybridge and Haswell have multiple execution ports and pipelined
88 int X86TTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
95 int ISD = TLI->InstructionOpcodeToISD(Opcode);
96 assert(ISD && "Invalid opcode");
98 if (ISD == ISD::SDIV &&
99 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
101 // On X86, vector signed division by constants power-of-two are
102 // normally expanded to the sequence SRA + SRL + ADD + SRA.
103 // The OperandValue properties many not be same as that of previous
104 // operation;conservatively assume OP_None.
105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
106 Op2Info, TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
118 static const CostTblEntry<MVT::SimpleValueType>
119 AVX2UniformConstCostTable[] = {
120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
130 if (const auto *Entry = CostTableLookup(AVX2UniformConstCostTable, ISD,
132 return LT.first * Entry->Cost;
135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
136 { ISD::SHL, MVT::v16i32, 1 },
137 { ISD::SRL, MVT::v16i32, 1 },
138 { ISD::SRA, MVT::v16i32, 1 },
139 { ISD::SHL, MVT::v8i64, 1 },
140 { ISD::SRL, MVT::v8i64, 1 },
141 { ISD::SRA, MVT::v8i64, 1 },
144 if (ST->hasAVX512()) {
145 if (const auto *Entry = CostTableLookup(AVX512CostTable, ISD, LT.second))
146 return LT.first * Entry->Cost;
149 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
150 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
151 // customize them to detect the cases where shift amount is a scalar one.
152 { ISD::SHL, MVT::v4i32, 1 },
153 { ISD::SRL, MVT::v4i32, 1 },
154 { ISD::SRA, MVT::v4i32, 1 },
155 { ISD::SHL, MVT::v8i32, 1 },
156 { ISD::SRL, MVT::v8i32, 1 },
157 { ISD::SRA, MVT::v8i32, 1 },
158 { ISD::SHL, MVT::v2i64, 1 },
159 { ISD::SRL, MVT::v2i64, 1 },
160 { ISD::SHL, MVT::v4i64, 1 },
161 { ISD::SRL, MVT::v4i64, 1 },
164 // Look for AVX2 lowering tricks.
166 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
167 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
168 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
169 // On AVX2, a packed v16i16 shift left by a constant build_vector
170 // is lowered into a vector multiply (vpmullw).
173 if (const auto *Entry = CostTableLookup(AVX2CostTable, ISD, LT.second))
174 return LT.first * Entry->Cost;
177 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = {
178 // 128bit shifts take 1cy, but right shifts require negation beforehand.
179 { ISD::SHL, MVT::v16i8, 1 },
180 { ISD::SRL, MVT::v16i8, 2 },
181 { ISD::SRA, MVT::v16i8, 2 },
182 { ISD::SHL, MVT::v8i16, 1 },
183 { ISD::SRL, MVT::v8i16, 2 },
184 { ISD::SRA, MVT::v8i16, 2 },
185 { ISD::SHL, MVT::v4i32, 1 },
186 { ISD::SRL, MVT::v4i32, 2 },
187 { ISD::SRA, MVT::v4i32, 2 },
188 { ISD::SHL, MVT::v2i64, 1 },
189 { ISD::SRL, MVT::v2i64, 2 },
190 { ISD::SRA, MVT::v2i64, 2 },
191 // 256bit shifts require splitting if AVX2 didn't catch them above.
192 { ISD::SHL, MVT::v32i8, 2 },
193 { ISD::SRL, MVT::v32i8, 4 },
194 { ISD::SRA, MVT::v32i8, 4 },
195 { ISD::SHL, MVT::v16i16, 2 },
196 { ISD::SRL, MVT::v16i16, 4 },
197 { ISD::SRA, MVT::v16i16, 4 },
198 { ISD::SHL, MVT::v8i32, 2 },
199 { ISD::SRL, MVT::v8i32, 4 },
200 { ISD::SRA, MVT::v8i32, 4 },
201 { ISD::SHL, MVT::v4i64, 2 },
202 { ISD::SRL, MVT::v4i64, 4 },
203 { ISD::SRA, MVT::v4i64, 4 },
206 // Look for XOP lowering tricks.
208 if (const auto *Entry = CostTableLookup(XOPCostTable, ISD, LT.second))
209 return LT.first * Entry->Cost;
212 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = {
213 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
214 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
216 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
217 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
219 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
220 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
221 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
222 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
224 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
225 { ISD::SDIV, MVT::v32i8, 32*20 },
226 { ISD::SDIV, MVT::v16i16, 16*20 },
227 { ISD::SDIV, MVT::v8i32, 8*20 },
228 { ISD::SDIV, MVT::v4i64, 4*20 },
229 { ISD::UDIV, MVT::v32i8, 32*20 },
230 { ISD::UDIV, MVT::v16i16, 16*20 },
231 { ISD::UDIV, MVT::v8i32, 8*20 },
232 { ISD::UDIV, MVT::v4i64, 4*20 },
235 // Look for AVX2 lowering tricks for custom cases.
237 if (const auto *Entry = CostTableLookup(AVX2CustomCostTable, ISD,
239 return LT.first * Entry->Cost;
242 static const CostTblEntry<MVT::SimpleValueType>
243 SSE2UniformConstCostTable[] = {
244 // We don't correctly identify costs of casts because they are marked as
246 // Constant splats are cheaper for the following instructions.
247 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
248 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
249 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
250 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
251 { ISD::SHL, MVT::v4i32, 1 }, // pslld
252 { ISD::SHL, MVT::v8i32, 2 }, // pslld
253 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
254 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
256 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
257 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
258 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
259 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
260 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
261 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
262 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
263 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
265 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
266 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
267 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
268 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
269 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
270 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
271 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
272 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
274 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
275 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
276 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
277 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
280 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
283 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
284 return LT.first * 15;
286 if (const auto *Entry = CostTableLookup(SSE2UniformConstCostTable, ISD,
288 return LT.first * Entry->Cost;
291 if (ISD == ISD::SHL &&
292 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
294 // Vector shift left by non uniform constant can be lowered
295 // into vector multiply (pmullw/pmulld).
296 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
297 (VT == MVT::v4i32 && ST->hasSSE41()))
300 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
301 // sequence of extract + two vector multiply + insert.
302 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
303 (ST->hasAVX() && !ST->hasAVX2()))
306 // A vector shift left by non uniform constant is converted
307 // into a vector multiply; the new multiply is eventually
308 // lowered into a sequence of shuffles and 2 x pmuludq.
309 if (VT == MVT::v4i32 && ST->hasSSE2())
313 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
314 // We don't correctly identify costs of casts because they are marked as
316 // For some cases, where the shift amount is a scalar we would be able
317 // to generate better code. Unfortunately, when this is the case the value
318 // (the splat) will get hoisted out of the loop, thereby making it invisible
319 // to ISel. The cost model must return worst case assumptions because it is
320 // used for vectorization and we don't want to make vectorized code worse
322 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
323 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
324 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
325 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
326 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
327 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
328 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
329 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
331 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
332 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
333 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
334 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
335 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
336 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
337 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
338 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
340 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
341 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
342 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
343 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
344 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
345 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
346 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
347 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
349 // It is not a good idea to vectorize division. We have to scalarize it and
350 // in the process we will often end up having to spilling regular
351 // registers. The overhead of division is going to dominate most kernels
352 // anyways so try hard to prevent vectorization of division - it is
353 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
354 // to hide "20 cycles" for each lane.
355 { ISD::SDIV, MVT::v16i8, 16*20 },
356 { ISD::SDIV, MVT::v8i16, 8*20 },
357 { ISD::SDIV, MVT::v4i32, 4*20 },
358 { ISD::SDIV, MVT::v2i64, 2*20 },
359 { ISD::UDIV, MVT::v16i8, 16*20 },
360 { ISD::UDIV, MVT::v8i16, 8*20 },
361 { ISD::UDIV, MVT::v4i32, 4*20 },
362 { ISD::UDIV, MVT::v2i64, 2*20 },
366 if (const auto *Entry = CostTableLookup(SSE2CostTable, ISD, LT.second))
367 return LT.first * Entry->Cost;
370 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
371 // We don't have to scalarize unsupported ops. We can issue two half-sized
372 // operations and we only need to extract the upper YMM half.
373 // Two ops + 1 extract + 1 insert = 4.
374 { ISD::MUL, MVT::v16i16, 4 },
375 { ISD::MUL, MVT::v8i32, 4 },
376 { ISD::SUB, MVT::v8i32, 4 },
377 { ISD::ADD, MVT::v8i32, 4 },
378 { ISD::SUB, MVT::v4i64, 4 },
379 { ISD::ADD, MVT::v4i64, 4 },
380 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
381 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
382 // Because we believe v4i64 to be a legal type, we must also include the
383 // split factor of two in the cost table. Therefore, the cost here is 18
385 { ISD::MUL, MVT::v4i64, 18 },
388 // Look for AVX1 lowering tricks.
389 if (ST->hasAVX() && !ST->hasAVX2()) {
392 if (const auto *Entry = CostTableLookup(AVX1CostTable, ISD, VT))
393 return LT.first * Entry->Cost;
396 // Custom lowering of vectors.
397 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
398 // A v2i64/v4i64 and multiply is custom lowered as a series of long
399 // multiplies(3), shifts(4) and adds(2).
400 { ISD::MUL, MVT::v2i64, 9 },
401 { ISD::MUL, MVT::v4i64, 9 },
403 if (const auto *Entry = CostTableLookup(CustomLowered, ISD, LT.second))
404 return LT.first * Entry->Cost;
406 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
407 // 2x pmuludq, 2x shuffle.
408 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
412 // Fallback to the default implementation.
413 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
416 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
418 // We only estimate the cost of reverse and alternate shuffles.
419 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
420 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
422 if (Kind == TTI::SK_Reverse) {
423 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
425 if (LT.second.getSizeInBits() > 128)
426 Cost = 3; // Extract + insert + copy.
428 // Multiple by the number of parts.
429 return Cost * LT.first;
432 if (Kind == TTI::SK_Alternate) {
433 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
434 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
435 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
437 // The backend knows how to generate a single VEX.256 version of
438 // instruction VPBLENDW if the target supports AVX2.
439 if (ST->hasAVX2() && LT.second == MVT::v16i16)
442 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
443 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
444 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
446 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
447 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
449 // This shuffle is custom lowered into a sequence of:
450 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
451 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
453 // This shuffle is custom lowered into a long sequence of:
454 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
455 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
459 if (const auto *Entry = CostTableLookup(AVXAltShuffleTbl,
460 ISD::VECTOR_SHUFFLE, LT.second))
461 return LT.first * Entry->Cost;
463 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
464 // These are lowered into movsd.
465 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
466 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
468 // packed float vectors with four elements are lowered into BLENDI dag
469 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
470 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
471 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
473 // This shuffle generates a single pshufw.
474 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
476 // There is no instruction that matches a v16i8 alternate shuffle.
477 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
478 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
482 if (const auto *Entry = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE,
484 return LT.first * Entry->Cost;
486 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
487 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
488 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
490 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
491 // the sequence 'shufps + pshufd'
492 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
493 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
495 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
496 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
500 if (const auto *Entry = CostTableLookup(SSSE3AltShuffleTbl,
501 ISD::VECTOR_SHUFFLE, LT.second))
502 return LT.first * Entry->Cost;
504 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
505 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
506 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
508 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
509 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
511 // This is expanded into a long sequence of four extract + four insert.
512 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
514 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
515 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
518 // Fall-back (SSE3 and SSE2).
519 if (const auto *Entry = CostTableLookup(SSEAltShuffleTbl,
520 ISD::VECTOR_SHUFFLE, LT.second))
521 return LT.first * Entry->Cost;
522 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
525 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
528 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
529 int ISD = TLI->InstructionOpcodeToISD(Opcode);
530 assert(ISD && "Invalid opcode");
532 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
533 AVX512ConversionTbl[] = {
534 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
535 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
536 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
537 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
539 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
540 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
541 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
542 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
543 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
545 // v16i1 -> v16i32 - load + broadcast
546 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
547 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
549 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
550 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
551 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
552 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
553 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
554 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
556 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
557 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
558 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
559 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
560 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
561 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
562 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
565 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
566 AVX2ConversionTbl[] = {
567 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
568 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
569 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
570 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
571 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
572 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
573 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
574 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
575 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
576 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
577 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
578 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
579 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
580 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
581 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
582 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
584 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
585 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
586 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
587 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
588 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
589 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
591 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
592 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
594 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
597 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
598 AVXConversionTbl[] = {
599 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
600 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
601 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
602 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
603 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
604 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
605 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
606 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
607 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
608 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
609 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
610 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
611 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
612 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
613 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
614 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
616 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
617 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
618 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
619 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
620 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
621 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
622 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
624 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
625 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
626 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
627 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
628 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
629 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
630 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
631 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
632 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
633 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
634 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
635 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
637 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
638 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
639 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
640 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
641 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
642 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
643 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
644 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
645 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
646 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
647 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
648 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
649 // The generic code to compute the scalar overhead is currently broken.
650 // Workaround this limitation by estimating the scalarization overhead
651 // here. We have roughly 10 instructions per scalar element.
652 // Multiply that by the vector width.
653 // FIXME: remove that when PR19268 is fixed.
654 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
655 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
657 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
658 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
659 // This node is expanded into scalarized operations but BasicTTI is overly
660 // optimistic estimating its cost. It computes 3 per element (one
661 // vector-extract, one scalar conversion and one vector-insert). The
662 // problem is that the inserts form a read-modify-write chain so latency
663 // should be factored in too. Inflating the cost per element by 1.
664 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
665 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
668 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
670 // These are somewhat magic numbers justified by looking at the output of
671 // Intel's IACA, running some kernels and making sure when we take
672 // legalization into account the throughput will be overestimated.
673 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
674 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
675 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
676 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
677 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
678 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
679 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
680 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
681 // There are faster sequences for float conversions.
682 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
683 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
684 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
685 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
686 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
687 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
688 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
689 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
692 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
693 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
695 if (ST->hasSSE2() && !ST->hasAVX()) {
696 if (const auto *Entry = ConvertCostTableLookup(SSE2ConvTbl, ISD,
697 LTDest.second, LTSrc.second))
698 return LTSrc.first * Entry->Cost;
701 if (ST->hasAVX512()) {
702 if (const auto *Entry = ConvertCostTableLookup(AVX512ConversionTbl, ISD,
703 LTDest.second, LTSrc.second))
707 EVT SrcTy = TLI->getValueType(DL, Src);
708 EVT DstTy = TLI->getValueType(DL, Dst);
710 // The function getSimpleVT only handles simple value types.
711 if (!SrcTy.isSimple() || !DstTy.isSimple())
712 return BaseT::getCastInstrCost(Opcode, Dst, Src);
715 if (const auto *Entry = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
717 SrcTy.getSimpleVT()))
722 if (const auto *Entry = ConvertCostTableLookup(AVXConversionTbl, ISD,
724 SrcTy.getSimpleVT()))
728 return BaseT::getCastInstrCost(Opcode, Dst, Src);
731 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
732 // Legalize the type.
733 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
737 int ISD = TLI->InstructionOpcodeToISD(Opcode);
738 assert(ISD && "Invalid opcode");
740 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
741 { ISD::SETCC, MVT::v2f64, 1 },
742 { ISD::SETCC, MVT::v4f32, 1 },
743 { ISD::SETCC, MVT::v2i64, 1 },
744 { ISD::SETCC, MVT::v4i32, 1 },
745 { ISD::SETCC, MVT::v8i16, 1 },
746 { ISD::SETCC, MVT::v16i8, 1 },
749 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
750 { ISD::SETCC, MVT::v4f64, 1 },
751 { ISD::SETCC, MVT::v8f32, 1 },
752 // AVX1 does not support 8-wide integer compare.
753 { ISD::SETCC, MVT::v4i64, 4 },
754 { ISD::SETCC, MVT::v8i32, 4 },
755 { ISD::SETCC, MVT::v16i16, 4 },
756 { ISD::SETCC, MVT::v32i8, 4 },
759 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
760 { ISD::SETCC, MVT::v4i64, 1 },
761 { ISD::SETCC, MVT::v8i32, 1 },
762 { ISD::SETCC, MVT::v16i16, 1 },
763 { ISD::SETCC, MVT::v32i8, 1 },
766 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
767 { ISD::SETCC, MVT::v8i64, 1 },
768 { ISD::SETCC, MVT::v16i32, 1 },
769 { ISD::SETCC, MVT::v8f64, 1 },
770 { ISD::SETCC, MVT::v16f32, 1 },
774 if (const auto *Entry = CostTableLookup(AVX512CostTbl, ISD, MTy))
775 return LT.first * Entry->Cost;
778 if (const auto *Entry = CostTableLookup(AVX2CostTbl, ISD, MTy))
779 return LT.first * Entry->Cost;
782 if (const auto *Entry = CostTableLookup(AVX1CostTbl, ISD, MTy))
783 return LT.first * Entry->Cost;
786 if (const auto *Entry = CostTableLookup(SSE42CostTbl, ISD, MTy))
787 return LT.first * Entry->Cost;
789 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
792 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
793 assert(Val->isVectorTy() && "This must be a vector type");
796 // Legalize the type.
797 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
799 // This type is legalized to a scalar type.
800 if (!LT.second.isVector())
803 // The type may be split. Normalize the index to the new type.
804 unsigned Width = LT.second.getVectorNumElements();
805 Index = Index % Width;
807 // Floating point scalars are already located in index #0.
808 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
812 return BaseT::getVectorInstrCost(Opcode, Val, Index);
815 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
816 assert (Ty->isVectorTy() && "Can only scalarize vectors");
819 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
821 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
823 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
829 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
830 unsigned AddressSpace) {
831 // Handle non-power-of-two vectors such as <3 x float>
832 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
833 unsigned NumElem = VTy->getVectorNumElements();
835 // Handle a few common cases:
837 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
838 // Cost = 64 bit store + extract + 32 bit store.
842 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
843 // Cost = 128 bit store + unpack + 64 bit store.
846 // Assume that all other non-power-of-two numbers are scalarized.
847 if (!isPowerOf2_32(NumElem)) {
848 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
850 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
851 Opcode == Instruction::Store);
852 return NumElem * Cost + SplitCost;
856 // Legalize the type.
857 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
858 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
861 // Each load/store unit costs 1.
862 int Cost = LT.first * 1;
864 // On Sandybridge 256bit load/stores are double pumped
865 // (but not on Haswell).
866 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
872 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
874 unsigned AddressSpace) {
875 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
877 // To calculate scalar take the regular cost, without mask
878 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
880 unsigned NumElem = SrcVTy->getVectorNumElements();
882 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
883 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
884 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
885 !isPowerOf2_32(NumElem)) {
887 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
888 int ScalarCompareCost = getCmpSelInstrCost(
889 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
890 int BranchCost = getCFInstrCost(Instruction::Br);
891 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
893 int ValueSplitCost = getScalarizationOverhead(
894 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
896 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
897 Alignment, AddressSpace);
898 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
901 // Legalize the type.
902 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
904 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
905 LT.second.getVectorNumElements() == NumElem)
906 // Promotion requires expand/truncate for data and a shuffle for mask.
907 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
908 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
910 else if (LT.second.getVectorNumElements() > NumElem) {
911 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
912 LT.second.getVectorNumElements());
913 // Expanding requires fill mask with zeroes
914 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
916 if (!ST->hasAVX512())
917 return Cost + LT.first*4; // Each maskmov costs 4
919 // AVX-512 masked load/store is cheapper
920 return Cost+LT.first;
923 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
924 // Address computations in vectorized code with non-consecutive addresses will
925 // likely result in more instructions compared to scalar code where the
926 // computation can more often be merged into the index mode. The resulting
927 // extra micro-ops can significantly decrease throughput.
928 unsigned NumVectorInstToHideOverhead = 10;
930 if (Ty->isVectorTy() && IsComplex)
931 return NumVectorInstToHideOverhead;
933 return BaseT::getAddressComputationCost(Ty, IsComplex);
936 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
939 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
943 int ISD = TLI->InstructionOpcodeToISD(Opcode);
944 assert(ISD && "Invalid opcode");
946 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
947 // and make it as the cost.
949 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
950 { ISD::FADD, MVT::v2f64, 2 },
951 { ISD::FADD, MVT::v4f32, 4 },
952 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
953 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
954 { ISD::ADD, MVT::v8i16, 5 },
957 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
958 { ISD::FADD, MVT::v4f32, 4 },
959 { ISD::FADD, MVT::v4f64, 5 },
960 { ISD::FADD, MVT::v8f32, 7 },
961 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
962 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
963 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
964 { ISD::ADD, MVT::v8i16, 5 },
965 { ISD::ADD, MVT::v8i32, 5 },
968 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
969 { ISD::FADD, MVT::v2f64, 2 },
970 { ISD::FADD, MVT::v4f32, 4 },
971 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
972 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
973 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
976 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
977 { ISD::FADD, MVT::v4f32, 3 },
978 { ISD::FADD, MVT::v4f64, 3 },
979 { ISD::FADD, MVT::v8f32, 4 },
980 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
981 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
982 { ISD::ADD, MVT::v4i64, 3 },
983 { ISD::ADD, MVT::v8i16, 4 },
984 { ISD::ADD, MVT::v8i32, 5 },
989 if (const auto *Entry = CostTableLookup(AVX1CostTblPairWise, ISD, MTy))
990 return LT.first * Entry->Cost;
993 if (const auto *Entry = CostTableLookup(SSE42CostTblPairWise, ISD, MTy))
994 return LT.first * Entry->Cost;
997 if (const auto *Entry = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy))
998 return LT.first * Entry->Cost;
1001 if (const auto *Entry = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy))
1002 return LT.first * Entry->Cost;
1005 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1008 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1009 /// method might only calculate a fraction of a larger immediate. Therefore it
1010 /// is valid to return a cost of ZERO.
1011 int X86TTIImpl::getIntImmCost(int64_t Val) {
1013 return TTI::TCC_Free;
1016 return TTI::TCC_Basic;
1018 return 2 * TTI::TCC_Basic;
1021 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1022 assert(Ty->isIntegerTy());
1024 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1028 // Never hoist constants larger than 128bit, because this might lead to
1029 // incorrect code generation or assertions in codegen.
1030 // Fixme: Create a cost model for types larger than i128 once the codegen
1031 // issues have been fixed.
1033 return TTI::TCC_Free;
1036 return TTI::TCC_Free;
1038 // Sign-extend all constants to a multiple of 64-bit.
1041 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1043 // Split the constant into 64-bit chunks and calculate the cost for each
1046 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1047 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1048 int64_t Val = Tmp.getSExtValue();
1049 Cost += getIntImmCost(Val);
1051 // We need at least one instruction to materialze the constant.
1052 return std::max(1, Cost);
1055 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1057 assert(Ty->isIntegerTy());
1059 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1060 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1061 // here, so that constant hoisting will ignore this constant.
1063 return TTI::TCC_Free;
1065 unsigned ImmIdx = ~0U;
1068 return TTI::TCC_Free;
1069 case Instruction::GetElementPtr:
1070 // Always hoist the base address of a GetElementPtr. This prevents the
1071 // creation of new constants for every base constant that gets constant
1072 // folded with the offset.
1074 return 2 * TTI::TCC_Basic;
1075 return TTI::TCC_Free;
1076 case Instruction::Store:
1079 case Instruction::And:
1080 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1081 // by using a 32-bit operation with implicit zero extension. Detect such
1082 // immediates here as the normal path expects bit 31 to be sign extended.
1083 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1084 return TTI::TCC_Free;
1086 case Instruction::Add:
1087 case Instruction::Sub:
1088 case Instruction::Mul:
1089 case Instruction::UDiv:
1090 case Instruction::SDiv:
1091 case Instruction::URem:
1092 case Instruction::SRem:
1093 case Instruction::Or:
1094 case Instruction::Xor:
1095 case Instruction::ICmp:
1098 // Always return TCC_Free for the shift value of a shift instruction.
1099 case Instruction::Shl:
1100 case Instruction::LShr:
1101 case Instruction::AShr:
1103 return TTI::TCC_Free;
1105 case Instruction::Trunc:
1106 case Instruction::ZExt:
1107 case Instruction::SExt:
1108 case Instruction::IntToPtr:
1109 case Instruction::PtrToInt:
1110 case Instruction::BitCast:
1111 case Instruction::PHI:
1112 case Instruction::Call:
1113 case Instruction::Select:
1114 case Instruction::Ret:
1115 case Instruction::Load:
1119 if (Idx == ImmIdx) {
1120 int NumConstants = (BitSize + 63) / 64;
1121 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1122 return (Cost <= NumConstants * TTI::TCC_Basic)
1123 ? static_cast<int>(TTI::TCC_Free)
1127 return X86TTIImpl::getIntImmCost(Imm, Ty);
1130 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1132 assert(Ty->isIntegerTy());
1134 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1135 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1136 // here, so that constant hoisting will ignore this constant.
1138 return TTI::TCC_Free;
1142 return TTI::TCC_Free;
1143 case Intrinsic::sadd_with_overflow:
1144 case Intrinsic::uadd_with_overflow:
1145 case Intrinsic::ssub_with_overflow:
1146 case Intrinsic::usub_with_overflow:
1147 case Intrinsic::smul_with_overflow:
1148 case Intrinsic::umul_with_overflow:
1149 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1150 return TTI::TCC_Free;
1152 case Intrinsic::experimental_stackmap:
1153 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1154 return TTI::TCC_Free;
1156 case Intrinsic::experimental_patchpoint_void:
1157 case Intrinsic::experimental_patchpoint_i64:
1158 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1159 return TTI::TCC_Free;
1162 return X86TTIImpl::getIntImmCost(Imm, Ty);
1165 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
1166 Type *ScalarTy = DataTy->getScalarType();
1167 // TODO: Pointers should also be legal,
1168 // but it requires additional support in composing intrinsics name.
1169 // getPrimitiveSizeInBits() returns 0 for PointerType
1170 int DataWidth = ScalarTy->getPrimitiveSizeInBits();
1172 return (DataWidth >= 32 && ST->hasAVX2());
1175 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
1176 return isLegalMaskedLoad(DataType);
1179 bool X86TTIImpl::isLegalMaskedGather(Type *DataTy) {
1180 // This function is called now in two cases: from the Loop Vectorizer
1181 // and from the Scalarizer.
1182 // When the Loop Vectorizer asks about legality of the feature,
1183 // the vectorization factor is not calculated yet. The Loop Vectorizer
1184 // sends a scalar type and the decision is based on the width of the
1186 // Later on, the cost model will estimate usage this intrinsic based on
1188 // The Scalarizer asks again about legality. It sends a vector type.
1189 // In this case we can reject non-power-of-2 vectors.
1190 if (isa<VectorType>(DataTy) && !isPowerOf2_32(DataTy->getVectorNumElements()))
1192 Type *ScalarTy = DataTy->getScalarType();
1193 // TODO: Pointers should also be legal,
1194 // but it requires additional support in composing intrinsics name.
1195 // getPrimitiveSizeInBits() returns 0 for PointerType
1196 int DataWidth = ScalarTy->getPrimitiveSizeInBits();
1198 // AVX-512 allows gather and scatter
1199 return DataWidth >= 32 && ST->hasAVX512();
1202 bool X86TTIImpl::isLegalMaskedScatter(Type *DataType) {
1203 return isLegalMaskedGather(DataType);
1206 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1207 const Function *Callee) const {
1208 const TargetMachine &TM = getTLI()->getTargetMachine();
1210 // Work this as a subsetting of subtarget features.
1211 const FeatureBitset &CallerBits =
1212 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1213 const FeatureBitset &CalleeBits =
1214 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1216 // FIXME: This is likely too limiting as it will include subtarget features
1217 // that we might not care about for inlining, but it is conservatively
1219 return (CallerBits & CalleeBits) == CalleeBits;