1 //===-- X86TargetTransformInfo.cpp - X86 specific TTI pass ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 /// This file implements a TargetTransformInfo analysis pass specific to the
11 /// X86 target machine. It uses the target's detailed information to provide
12 /// more precise answers to certain TTI queries, while letting the target
13 /// independent and default TTI implementations handle the rest.
15 //===----------------------------------------------------------------------===//
17 #include "X86TargetTransformInfo.h"
18 #include "llvm/Analysis/TargetTransformInfo.h"
19 #include "llvm/CodeGen/BasicTTIImpl.h"
20 #include "llvm/IR/IntrinsicInst.h"
21 #include "llvm/Support/Debug.h"
22 #include "llvm/Target/CostTable.h"
23 #include "llvm/Target/TargetLowering.h"
27 #define DEBUG_TYPE "x86tti"
29 //===----------------------------------------------------------------------===//
33 //===----------------------------------------------------------------------===//
35 TargetTransformInfo::PopcntSupportKind
36 X86TTIImpl::getPopcntSupport(unsigned TyWidth) {
37 assert(isPowerOf2_32(TyWidth) && "Ty width must be power of 2");
38 // TODO: Currently the __builtin_popcount() implementation using SSE3
39 // instructions is inefficient. Once the problem is fixed, we should
40 // call ST->hasSSE3() instead of ST->hasPOPCNT().
41 return ST->hasPOPCNT() ? TTI::PSK_FastHardware : TTI::PSK_Software;
44 unsigned X86TTIImpl::getNumberOfRegisters(bool Vector) {
45 if (Vector && !ST->hasSSE1())
49 if (Vector && ST->hasAVX512())
56 unsigned X86TTIImpl::getRegisterBitWidth(bool Vector) {
58 if (ST->hasAVX512()) return 512;
59 if (ST->hasAVX()) return 256;
60 if (ST->hasSSE1()) return 128;
70 unsigned X86TTIImpl::getMaxInterleaveFactor(unsigned VF) {
71 // If the loop will not be vectorized, don't interleave the loop.
72 // Let regular unroll to unroll the loop, which saves the overflow
73 // check and memory check cost.
80 // Sandybridge and Haswell have multiple execution ports and pipelined
88 int X86TTIImpl::getArithmeticInstrCost(
89 unsigned Opcode, Type *Ty, TTI::OperandValueKind Op1Info,
90 TTI::OperandValueKind Op2Info, TTI::OperandValueProperties Opd1PropInfo,
91 TTI::OperandValueProperties Opd2PropInfo) {
93 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Ty);
95 int ISD = TLI->InstructionOpcodeToISD(Opcode);
96 assert(ISD && "Invalid opcode");
98 if (ISD == ISD::SDIV &&
99 Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
100 Opd2PropInfo == TargetTransformInfo::OP_PowerOf2) {
101 // On X86, vector signed division by constants power-of-two are
102 // normally expanded to the sequence SRA + SRL + ADD + SRA.
103 // The OperandValue properties many not be same as that of previous
104 // operation;conservatively assume OP_None.
105 int Cost = 2 * getArithmeticInstrCost(Instruction::AShr, Ty, Op1Info,
106 Op2Info, TargetTransformInfo::OP_None,
107 TargetTransformInfo::OP_None);
108 Cost += getArithmeticInstrCost(Instruction::LShr, Ty, Op1Info, Op2Info,
109 TargetTransformInfo::OP_None,
110 TargetTransformInfo::OP_None);
111 Cost += getArithmeticInstrCost(Instruction::Add, Ty, Op1Info, Op2Info,
112 TargetTransformInfo::OP_None,
113 TargetTransformInfo::OP_None);
118 static const CostTblEntry<MVT::SimpleValueType>
119 AVX2UniformConstCostTable[] = {
120 { ISD::SRA, MVT::v4i64, 4 }, // 2 x psrad + shuffle.
122 { ISD::SDIV, MVT::v16i16, 6 }, // vpmulhw sequence
123 { ISD::UDIV, MVT::v16i16, 6 }, // vpmulhuw sequence
124 { ISD::SDIV, MVT::v8i32, 15 }, // vpmuldq sequence
125 { ISD::UDIV, MVT::v8i32, 15 }, // vpmuludq sequence
128 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
130 int Idx = CostTableLookup(AVX2UniformConstCostTable, ISD, LT.second);
132 return LT.first * AVX2UniformConstCostTable[Idx].Cost;
135 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTable[] = {
136 { ISD::SHL, MVT::v16i32, 1 },
137 { ISD::SRL, MVT::v16i32, 1 },
138 { ISD::SRA, MVT::v16i32, 1 },
139 { ISD::SHL, MVT::v8i64, 1 },
140 { ISD::SRL, MVT::v8i64, 1 },
141 { ISD::SRA, MVT::v8i64, 1 },
144 if (ST->hasAVX512()) {
145 int Idx = CostTableLookup(AVX512CostTable, ISD, LT.second);
147 return LT.first * AVX512CostTable[Idx].Cost;
150 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTable[] = {
151 // Shifts on v4i64/v8i32 on AVX2 is legal even though we declare to
152 // customize them to detect the cases where shift amount is a scalar one.
153 { ISD::SHL, MVT::v4i32, 1 },
154 { ISD::SRL, MVT::v4i32, 1 },
155 { ISD::SRA, MVT::v4i32, 1 },
156 { ISD::SHL, MVT::v8i32, 1 },
157 { ISD::SRL, MVT::v8i32, 1 },
158 { ISD::SRA, MVT::v8i32, 1 },
159 { ISD::SHL, MVT::v2i64, 1 },
160 { ISD::SRL, MVT::v2i64, 1 },
161 { ISD::SHL, MVT::v4i64, 1 },
162 { ISD::SRL, MVT::v4i64, 1 },
165 // Look for AVX2 lowering tricks.
167 if (ISD == ISD::SHL && LT.second == MVT::v16i16 &&
168 (Op2Info == TargetTransformInfo::OK_UniformConstantValue ||
169 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue))
170 // On AVX2, a packed v16i16 shift left by a constant build_vector
171 // is lowered into a vector multiply (vpmullw).
174 int Idx = CostTableLookup(AVX2CostTable, ISD, LT.second);
176 return LT.first * AVX2CostTable[Idx].Cost;
179 static const CostTblEntry<MVT::SimpleValueType> XOPCostTable[] = {
180 // 128bit shifts take 1cy, but right shifts require negation beforehand.
181 { ISD::SHL, MVT::v16i8, 1 },
182 { ISD::SRL, MVT::v16i8, 2 },
183 { ISD::SRA, MVT::v16i8, 2 },
184 { ISD::SHL, MVT::v8i16, 1 },
185 { ISD::SRL, MVT::v8i16, 2 },
186 { ISD::SRA, MVT::v8i16, 2 },
187 { ISD::SHL, MVT::v4i32, 1 },
188 { ISD::SRL, MVT::v4i32, 2 },
189 { ISD::SRA, MVT::v4i32, 2 },
190 { ISD::SHL, MVT::v2i64, 1 },
191 { ISD::SRL, MVT::v2i64, 2 },
192 { ISD::SRA, MVT::v2i64, 2 },
193 // 256bit shifts require splitting if AVX2 didn't catch them above.
194 { ISD::SHL, MVT::v32i8, 2 },
195 { ISD::SRL, MVT::v32i8, 4 },
196 { ISD::SRA, MVT::v32i8, 4 },
197 { ISD::SHL, MVT::v16i16, 2 },
198 { ISD::SRL, MVT::v16i16, 4 },
199 { ISD::SRA, MVT::v16i16, 4 },
200 { ISD::SHL, MVT::v8i32, 2 },
201 { ISD::SRL, MVT::v8i32, 4 },
202 { ISD::SRA, MVT::v8i32, 4 },
203 { ISD::SHL, MVT::v4i64, 2 },
204 { ISD::SRL, MVT::v4i64, 4 },
205 { ISD::SRA, MVT::v4i64, 4 },
208 // Look for XOP lowering tricks.
210 int Idx = CostTableLookup(XOPCostTable, ISD, LT.second);
212 return LT.first * XOPCostTable[Idx].Cost;
215 static const CostTblEntry<MVT::SimpleValueType> AVX2CustomCostTable[] = {
216 { ISD::SHL, MVT::v32i8, 11 }, // vpblendvb sequence.
217 { ISD::SHL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
219 { ISD::SRL, MVT::v32i8, 11 }, // vpblendvb sequence.
220 { ISD::SRL, MVT::v16i16, 10 }, // extend/vpsrlvd/pack sequence.
222 { ISD::SRA, MVT::v32i8, 24 }, // vpblendvb sequence.
223 { ISD::SRA, MVT::v16i16, 10 }, // extend/vpsravd/pack sequence.
224 { ISD::SRA, MVT::v2i64, 4 }, // srl/xor/sub sequence.
225 { ISD::SRA, MVT::v4i64, 4 }, // srl/xor/sub sequence.
227 // Vectorizing division is a bad idea. See the SSE2 table for more comments.
228 { ISD::SDIV, MVT::v32i8, 32*20 },
229 { ISD::SDIV, MVT::v16i16, 16*20 },
230 { ISD::SDIV, MVT::v8i32, 8*20 },
231 { ISD::SDIV, MVT::v4i64, 4*20 },
232 { ISD::UDIV, MVT::v32i8, 32*20 },
233 { ISD::UDIV, MVT::v16i16, 16*20 },
234 { ISD::UDIV, MVT::v8i32, 8*20 },
235 { ISD::UDIV, MVT::v4i64, 4*20 },
238 // Look for AVX2 lowering tricks for custom cases.
240 int Idx = CostTableLookup(AVX2CustomCostTable, ISD, LT.second);
242 return LT.first * AVX2CustomCostTable[Idx].Cost;
245 static const CostTblEntry<MVT::SimpleValueType>
246 SSE2UniformConstCostTable[] = {
247 // We don't correctly identify costs of casts because they are marked as
249 // Constant splats are cheaper for the following instructions.
250 { ISD::SHL, MVT::v16i8, 1 }, // psllw.
251 { ISD::SHL, MVT::v32i8, 2 }, // psllw.
252 { ISD::SHL, MVT::v8i16, 1 }, // psllw.
253 { ISD::SHL, MVT::v16i16, 2 }, // psllw.
254 { ISD::SHL, MVT::v4i32, 1 }, // pslld
255 { ISD::SHL, MVT::v8i32, 2 }, // pslld
256 { ISD::SHL, MVT::v2i64, 1 }, // psllq.
257 { ISD::SHL, MVT::v4i64, 2 }, // psllq.
259 { ISD::SRL, MVT::v16i8, 1 }, // psrlw.
260 { ISD::SRL, MVT::v32i8, 2 }, // psrlw.
261 { ISD::SRL, MVT::v8i16, 1 }, // psrlw.
262 { ISD::SRL, MVT::v16i16, 2 }, // psrlw.
263 { ISD::SRL, MVT::v4i32, 1 }, // psrld.
264 { ISD::SRL, MVT::v8i32, 2 }, // psrld.
265 { ISD::SRL, MVT::v2i64, 1 }, // psrlq.
266 { ISD::SRL, MVT::v4i64, 2 }, // psrlq.
268 { ISD::SRA, MVT::v16i8, 4 }, // psrlw, pand, pxor, psubb.
269 { ISD::SRA, MVT::v32i8, 8 }, // psrlw, pand, pxor, psubb.
270 { ISD::SRA, MVT::v8i16, 1 }, // psraw.
271 { ISD::SRA, MVT::v16i16, 2 }, // psraw.
272 { ISD::SRA, MVT::v4i32, 1 }, // psrad.
273 { ISD::SRA, MVT::v8i32, 2 }, // psrad.
274 { ISD::SRA, MVT::v2i64, 4 }, // 2 x psrad + shuffle.
275 { ISD::SRA, MVT::v4i64, 8 }, // 2 x psrad + shuffle.
277 { ISD::SDIV, MVT::v8i16, 6 }, // pmulhw sequence
278 { ISD::UDIV, MVT::v8i16, 6 }, // pmulhuw sequence
279 { ISD::SDIV, MVT::v4i32, 19 }, // pmuludq sequence
280 { ISD::UDIV, MVT::v4i32, 15 }, // pmuludq sequence
283 if (Op2Info == TargetTransformInfo::OK_UniformConstantValue &&
286 if (ISD == ISD::SDIV && LT.second == MVT::v4i32 && ST->hasSSE41())
287 return LT.first * 15;
289 int Idx = CostTableLookup(SSE2UniformConstCostTable, ISD, LT.second);
291 return LT.first * SSE2UniformConstCostTable[Idx].Cost;
294 if (ISD == ISD::SHL &&
295 Op2Info == TargetTransformInfo::OK_NonUniformConstantValue) {
297 // Vector shift left by non uniform constant can be lowered
298 // into vector multiply (pmullw/pmulld).
299 if ((VT == MVT::v8i16 && ST->hasSSE2()) ||
300 (VT == MVT::v4i32 && ST->hasSSE41()))
303 // v16i16 and v8i32 shifts by non-uniform constants are lowered into a
304 // sequence of extract + two vector multiply + insert.
305 if ((VT == MVT::v8i32 || VT == MVT::v16i16) &&
306 (ST->hasAVX() && !ST->hasAVX2()))
309 // A vector shift left by non uniform constant is converted
310 // into a vector multiply; the new multiply is eventually
311 // lowered into a sequence of shuffles and 2 x pmuludq.
312 if (VT == MVT::v4i32 && ST->hasSSE2())
316 static const CostTblEntry<MVT::SimpleValueType> SSE2CostTable[] = {
317 // We don't correctly identify costs of casts because they are marked as
319 // For some cases, where the shift amount is a scalar we would be able
320 // to generate better code. Unfortunately, when this is the case the value
321 // (the splat) will get hoisted out of the loop, thereby making it invisible
322 // to ISel. The cost model must return worst case assumptions because it is
323 // used for vectorization and we don't want to make vectorized code worse
325 { ISD::SHL, MVT::v16i8, 26 }, // cmpgtb sequence.
326 { ISD::SHL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
327 { ISD::SHL, MVT::v8i16, 32 }, // cmpgtb sequence.
328 { ISD::SHL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
329 { ISD::SHL, MVT::v4i32, 2*5 }, // We optimized this using mul.
330 { ISD::SHL, MVT::v8i32, 2*2*5 }, // We optimized this using mul.
331 { ISD::SHL, MVT::v2i64, 4 }, // splat+shuffle sequence.
332 { ISD::SHL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
334 { ISD::SRL, MVT::v16i8, 26 }, // cmpgtb sequence.
335 { ISD::SRL, MVT::v32i8, 2*26 }, // cmpgtb sequence.
336 { ISD::SRL, MVT::v8i16, 32 }, // cmpgtb sequence.
337 { ISD::SRL, MVT::v16i16, 2*32 }, // cmpgtb sequence.
338 { ISD::SRL, MVT::v4i32, 16 }, // Shift each lane + blend.
339 { ISD::SRL, MVT::v8i32, 2*16 }, // Shift each lane + blend.
340 { ISD::SRL, MVT::v2i64, 4 }, // splat+shuffle sequence.
341 { ISD::SRL, MVT::v4i64, 2*4 }, // splat+shuffle sequence.
343 { ISD::SRA, MVT::v16i8, 54 }, // unpacked cmpgtb sequence.
344 { ISD::SRA, MVT::v32i8, 2*54 }, // unpacked cmpgtb sequence.
345 { ISD::SRA, MVT::v8i16, 32 }, // cmpgtb sequence.
346 { ISD::SRA, MVT::v16i16, 2*32 }, // cmpgtb sequence.
347 { ISD::SRA, MVT::v4i32, 16 }, // Shift each lane + blend.
348 { ISD::SRA, MVT::v8i32, 2*16 }, // Shift each lane + blend.
349 { ISD::SRA, MVT::v2i64, 12 }, // srl/xor/sub sequence.
350 { ISD::SRA, MVT::v4i64, 2*12 }, // srl/xor/sub sequence.
352 // It is not a good idea to vectorize division. We have to scalarize it and
353 // in the process we will often end up having to spilling regular
354 // registers. The overhead of division is going to dominate most kernels
355 // anyways so try hard to prevent vectorization of division - it is
356 // generally a bad idea. Assume somewhat arbitrarily that we have to be able
357 // to hide "20 cycles" for each lane.
358 { ISD::SDIV, MVT::v16i8, 16*20 },
359 { ISD::SDIV, MVT::v8i16, 8*20 },
360 { ISD::SDIV, MVT::v4i32, 4*20 },
361 { ISD::SDIV, MVT::v2i64, 2*20 },
362 { ISD::UDIV, MVT::v16i8, 16*20 },
363 { ISD::UDIV, MVT::v8i16, 8*20 },
364 { ISD::UDIV, MVT::v4i32, 4*20 },
365 { ISD::UDIV, MVT::v2i64, 2*20 },
369 int Idx = CostTableLookup(SSE2CostTable, ISD, LT.second);
371 return LT.first * SSE2CostTable[Idx].Cost;
374 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTable[] = {
375 // We don't have to scalarize unsupported ops. We can issue two half-sized
376 // operations and we only need to extract the upper YMM half.
377 // Two ops + 1 extract + 1 insert = 4.
378 { ISD::MUL, MVT::v16i16, 4 },
379 { ISD::MUL, MVT::v8i32, 4 },
380 { ISD::SUB, MVT::v8i32, 4 },
381 { ISD::ADD, MVT::v8i32, 4 },
382 { ISD::SUB, MVT::v4i64, 4 },
383 { ISD::ADD, MVT::v4i64, 4 },
384 // A v4i64 multiply is custom lowered as two split v2i64 vectors that then
385 // are lowered as a series of long multiplies(3), shifts(4) and adds(2)
386 // Because we believe v4i64 to be a legal type, we must also include the
387 // split factor of two in the cost table. Therefore, the cost here is 18
389 { ISD::MUL, MVT::v4i64, 18 },
392 // Look for AVX1 lowering tricks.
393 if (ST->hasAVX() && !ST->hasAVX2()) {
396 int Idx = CostTableLookup(AVX1CostTable, ISD, VT);
398 return LT.first * AVX1CostTable[Idx].Cost;
401 // Custom lowering of vectors.
402 static const CostTblEntry<MVT::SimpleValueType> CustomLowered[] = {
403 // A v2i64/v4i64 and multiply is custom lowered as a series of long
404 // multiplies(3), shifts(4) and adds(2).
405 { ISD::MUL, MVT::v2i64, 9 },
406 { ISD::MUL, MVT::v4i64, 9 },
408 int Idx = CostTableLookup(CustomLowered, ISD, LT.second);
410 return LT.first * CustomLowered[Idx].Cost;
412 // Special lowering of v4i32 mul on sse2, sse3: Lower v4i32 mul as 2x shuffle,
413 // 2x pmuludq, 2x shuffle.
414 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() &&
418 // Fallback to the default implementation.
419 return BaseT::getArithmeticInstrCost(Opcode, Ty, Op1Info, Op2Info);
422 int X86TTIImpl::getShuffleCost(TTI::ShuffleKind Kind, Type *Tp, int Index,
424 // We only estimate the cost of reverse and alternate shuffles.
425 if (Kind != TTI::SK_Reverse && Kind != TTI::SK_Alternate)
426 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
428 if (Kind == TTI::SK_Reverse) {
429 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
431 if (LT.second.getSizeInBits() > 128)
432 Cost = 3; // Extract + insert + copy.
434 // Multiple by the number of parts.
435 return Cost * LT.first;
438 if (Kind == TTI::SK_Alternate) {
439 // 64-bit packed float vectors (v2f32) are widened to type v4f32.
440 // 64-bit packed integer vectors (v2i32) are promoted to type v2i64.
441 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Tp);
443 // The backend knows how to generate a single VEX.256 version of
444 // instruction VPBLENDW if the target supports AVX2.
445 if (ST->hasAVX2() && LT.second == MVT::v16i16)
448 static const CostTblEntry<MVT::SimpleValueType> AVXAltShuffleTbl[] = {
449 {ISD::VECTOR_SHUFFLE, MVT::v4i64, 1}, // vblendpd
450 {ISD::VECTOR_SHUFFLE, MVT::v4f64, 1}, // vblendpd
452 {ISD::VECTOR_SHUFFLE, MVT::v8i32, 1}, // vblendps
453 {ISD::VECTOR_SHUFFLE, MVT::v8f32, 1}, // vblendps
455 // This shuffle is custom lowered into a sequence of:
456 // 2x vextractf128 , 2x vpblendw , 1x vinsertf128
457 {ISD::VECTOR_SHUFFLE, MVT::v16i16, 5},
459 // This shuffle is custom lowered into a long sequence of:
460 // 2x vextractf128 , 4x vpshufb , 2x vpor , 1x vinsertf128
461 {ISD::VECTOR_SHUFFLE, MVT::v32i8, 9}
465 int Idx = CostTableLookup(AVXAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
467 return LT.first * AVXAltShuffleTbl[Idx].Cost;
470 static const CostTblEntry<MVT::SimpleValueType> SSE41AltShuffleTbl[] = {
471 // These are lowered into movsd.
472 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1},
473 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1},
475 // packed float vectors with four elements are lowered into BLENDI dag
476 // nodes. A v4i32/v4f32 BLENDI generates a single 'blendps'/'blendpd'.
477 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 1},
478 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 1},
480 // This shuffle generates a single pshufw.
481 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 1},
483 // There is no instruction that matches a v16i8 alternate shuffle.
484 // The backend will expand it into the sequence 'pshufb + pshufb + or'.
485 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3}
488 if (ST->hasSSE41()) {
489 int Idx = CostTableLookup(SSE41AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
491 return LT.first * SSE41AltShuffleTbl[Idx].Cost;
494 static const CostTblEntry<MVT::SimpleValueType> SSSE3AltShuffleTbl[] = {
495 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
496 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
498 // SSE3 doesn't have 'blendps'. The following shuffles are expanded into
499 // the sequence 'shufps + pshufd'
500 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2},
501 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2},
503 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 3}, // pshufb + pshufb + or
504 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 3} // pshufb + pshufb + or
507 if (ST->hasSSSE3()) {
508 int Idx = CostTableLookup(SSSE3AltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
510 return LT.first * SSSE3AltShuffleTbl[Idx].Cost;
513 static const CostTblEntry<MVT::SimpleValueType> SSEAltShuffleTbl[] = {
514 {ISD::VECTOR_SHUFFLE, MVT::v2i64, 1}, // movsd
515 {ISD::VECTOR_SHUFFLE, MVT::v2f64, 1}, // movsd
517 {ISD::VECTOR_SHUFFLE, MVT::v4i32, 2}, // shufps + pshufd
518 {ISD::VECTOR_SHUFFLE, MVT::v4f32, 2}, // shufps + pshufd
520 // This is expanded into a long sequence of four extract + four insert.
521 {ISD::VECTOR_SHUFFLE, MVT::v8i16, 8}, // 4 x pextrw + 4 pinsrw.
523 // 8 x (pinsrw + pextrw + and + movb + movzb + or)
524 {ISD::VECTOR_SHUFFLE, MVT::v16i8, 48}
527 // Fall-back (SSE3 and SSE2).
528 int Idx = CostTableLookup(SSEAltShuffleTbl, ISD::VECTOR_SHUFFLE, LT.second);
530 return LT.first * SSEAltShuffleTbl[Idx].Cost;
531 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
534 return BaseT::getShuffleCost(Kind, Tp, Index, SubTp);
537 int X86TTIImpl::getCastInstrCost(unsigned Opcode, Type *Dst, Type *Src) {
538 int ISD = TLI->InstructionOpcodeToISD(Opcode);
539 assert(ISD && "Invalid opcode");
541 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
542 AVX512ConversionTbl[] = {
543 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 1 },
544 { ISD::FP_EXTEND, MVT::v8f64, MVT::v16f32, 3 },
545 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 1 },
546 { ISD::FP_ROUND, MVT::v16f32, MVT::v8f64, 3 },
548 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i32, 1 },
549 { ISD::TRUNCATE, MVT::v16i16, MVT::v16i32, 1 },
550 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 },
551 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 },
552 { ISD::TRUNCATE, MVT::v16i32, MVT::v8i64, 4 },
554 // v16i1 -> v16i32 - load + broadcast
555 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
556 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i1, 2 },
558 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
559 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i8, 1 },
560 { ISD::SIGN_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
561 { ISD::ZERO_EXTEND, MVT::v16i32, MVT::v16i16, 1 },
562 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
563 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v16i32, 3 },
565 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i1, 3 },
566 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i8, 2 },
567 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i16, 2 },
568 { ISD::SINT_TO_FP, MVT::v16f32, MVT::v16i32, 1 },
569 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i1, 4 },
570 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i16, 2 },
571 { ISD::SINT_TO_FP, MVT::v8f64, MVT::v8i32, 1 },
574 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
575 AVX2ConversionTbl[] = {
576 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
577 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 1 },
578 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
579 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 3 },
580 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
581 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 3 },
582 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
583 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 1 },
584 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
585 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 3 },
586 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
587 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 3 },
588 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
589 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
590 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
591 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 1 },
593 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 2 },
594 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 2 },
595 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 2 },
596 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 2 },
597 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 2 },
598 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 4 },
600 { ISD::FP_EXTEND, MVT::v8f64, MVT::v8f32, 3 },
601 { ISD::FP_ROUND, MVT::v8f32, MVT::v8f64, 3 },
603 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 8 },
606 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
607 AVXConversionTbl[] = {
608 { ISD::SIGN_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
609 { ISD::ZERO_EXTEND, MVT::v16i16, MVT::v16i8, 4 },
610 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i1, 7 },
611 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i1, 4 },
612 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i8, 7 },
613 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i8, 4 },
614 { ISD::SIGN_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
615 { ISD::ZERO_EXTEND, MVT::v8i32, MVT::v8i16, 4 },
616 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i1, 6 },
617 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i1, 4 },
618 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i8, 6 },
619 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i8, 4 },
620 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i16, 6 },
621 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i16, 3 },
622 { ISD::SIGN_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
623 { ISD::ZERO_EXTEND, MVT::v4i64, MVT::v4i32, 4 },
625 { ISD::TRUNCATE, MVT::v4i8, MVT::v4i64, 4 },
626 { ISD::TRUNCATE, MVT::v4i16, MVT::v4i64, 4 },
627 { ISD::TRUNCATE, MVT::v4i32, MVT::v4i64, 4 },
628 { ISD::TRUNCATE, MVT::v8i8, MVT::v8i32, 4 },
629 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i32, 5 },
630 { ISD::TRUNCATE, MVT::v16i8, MVT::v16i16, 4 },
631 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 9 },
633 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i1, 8 },
634 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i8, 8 },
635 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
636 { ISD::SINT_TO_FP, MVT::v8f32, MVT::v8i32, 1 },
637 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i1, 3 },
638 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i8, 3 },
639 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i16, 3 },
640 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 1 },
641 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i1, 3 },
642 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i8, 3 },
643 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i16, 3 },
644 { ISD::SINT_TO_FP, MVT::v4f64, MVT::v4i32, 1 },
646 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i1, 6 },
647 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i8, 5 },
648 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i16, 5 },
649 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i32, 9 },
650 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i1, 7 },
651 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i8, 2 },
652 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i16, 2 },
653 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 6 },
654 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i1, 7 },
655 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i8, 2 },
656 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i16, 2 },
657 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i32, 6 },
658 // The generic code to compute the scalar overhead is currently broken.
659 // Workaround this limitation by estimating the scalarization overhead
660 // here. We have roughly 10 instructions per scalar element.
661 // Multiply that by the vector width.
662 // FIXME: remove that when PR19268 is fixed.
663 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
664 { ISD::UINT_TO_FP, MVT::v4f64, MVT::v4i64, 4*10 },
666 { ISD::FP_TO_SINT, MVT::v8i8, MVT::v8f32, 7 },
667 { ISD::FP_TO_SINT, MVT::v4i8, MVT::v4f32, 1 },
668 // This node is expanded into scalarized operations but BasicTTI is overly
669 // optimistic estimating its cost. It computes 3 per element (one
670 // vector-extract, one scalar conversion and one vector-insert). The
671 // problem is that the inserts form a read-modify-write chain so latency
672 // should be factored in too. Inflating the cost per element by 1.
673 { ISD::FP_TO_UINT, MVT::v8i32, MVT::v8f32, 8*4 },
674 { ISD::FP_TO_UINT, MVT::v4i32, MVT::v4f64, 4*4 },
677 static const TypeConversionCostTblEntry<MVT::SimpleValueType>
679 // These are somewhat magic numbers justified by looking at the output of
680 // Intel's IACA, running some kernels and making sure when we take
681 // legalization into account the throughput will be overestimated.
682 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
683 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
684 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
685 { ISD::UINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
686 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v2i64, 2*10 },
687 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v4i32, 4*10 },
688 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v8i16, 8*10 },
689 { ISD::SINT_TO_FP, MVT::v2f64, MVT::v16i8, 16*10 },
690 // There are faster sequences for float conversions.
691 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
692 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v4i32, 8 },
693 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
694 { ISD::UINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
695 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v2i64, 15 },
696 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v4i32, 15 },
697 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v8i16, 15 },
698 { ISD::SINT_TO_FP, MVT::v4f32, MVT::v16i8, 8 },
701 std::pair<int, MVT> LTSrc = TLI->getTypeLegalizationCost(DL, Src);
702 std::pair<int, MVT> LTDest = TLI->getTypeLegalizationCost(DL, Dst);
704 if (ST->hasSSE2() && !ST->hasAVX()) {
706 ConvertCostTableLookup(SSE2ConvTbl, ISD, LTDest.second, LTSrc.second);
708 return LTSrc.first * SSE2ConvTbl[Idx].Cost;
711 if (ST->hasAVX512()) {
712 int Idx = ConvertCostTableLookup(AVX512ConversionTbl, ISD, LTDest.second,
715 return AVX512ConversionTbl[Idx].Cost;
718 EVT SrcTy = TLI->getValueType(DL, Src);
719 EVT DstTy = TLI->getValueType(DL, Dst);
721 // The function getSimpleVT only handles simple value types.
722 if (!SrcTy.isSimple() || !DstTy.isSimple())
723 return BaseT::getCastInstrCost(Opcode, Dst, Src);
726 int Idx = ConvertCostTableLookup(AVX2ConversionTbl, ISD,
727 DstTy.getSimpleVT(), SrcTy.getSimpleVT());
729 return AVX2ConversionTbl[Idx].Cost;
733 int Idx = ConvertCostTableLookup(AVXConversionTbl, ISD, DstTy.getSimpleVT(),
734 SrcTy.getSimpleVT());
736 return AVXConversionTbl[Idx].Cost;
739 return BaseT::getCastInstrCost(Opcode, Dst, Src);
742 int X86TTIImpl::getCmpSelInstrCost(unsigned Opcode, Type *ValTy, Type *CondTy) {
743 // Legalize the type.
744 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
748 int ISD = TLI->InstructionOpcodeToISD(Opcode);
749 assert(ISD && "Invalid opcode");
751 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTbl[] = {
752 { ISD::SETCC, MVT::v2f64, 1 },
753 { ISD::SETCC, MVT::v4f32, 1 },
754 { ISD::SETCC, MVT::v2i64, 1 },
755 { ISD::SETCC, MVT::v4i32, 1 },
756 { ISD::SETCC, MVT::v8i16, 1 },
757 { ISD::SETCC, MVT::v16i8, 1 },
760 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTbl[] = {
761 { ISD::SETCC, MVT::v4f64, 1 },
762 { ISD::SETCC, MVT::v8f32, 1 },
763 // AVX1 does not support 8-wide integer compare.
764 { ISD::SETCC, MVT::v4i64, 4 },
765 { ISD::SETCC, MVT::v8i32, 4 },
766 { ISD::SETCC, MVT::v16i16, 4 },
767 { ISD::SETCC, MVT::v32i8, 4 },
770 static const CostTblEntry<MVT::SimpleValueType> AVX2CostTbl[] = {
771 { ISD::SETCC, MVT::v4i64, 1 },
772 { ISD::SETCC, MVT::v8i32, 1 },
773 { ISD::SETCC, MVT::v16i16, 1 },
774 { ISD::SETCC, MVT::v32i8, 1 },
777 static const CostTblEntry<MVT::SimpleValueType> AVX512CostTbl[] = {
778 { ISD::SETCC, MVT::v8i64, 1 },
779 { ISD::SETCC, MVT::v16i32, 1 },
780 { ISD::SETCC, MVT::v8f64, 1 },
781 { ISD::SETCC, MVT::v16f32, 1 },
784 if (ST->hasAVX512()) {
785 int Idx = CostTableLookup(AVX512CostTbl, ISD, MTy);
787 return LT.first * AVX512CostTbl[Idx].Cost;
791 int Idx = CostTableLookup(AVX2CostTbl, ISD, MTy);
793 return LT.first * AVX2CostTbl[Idx].Cost;
797 int Idx = CostTableLookup(AVX1CostTbl, ISD, MTy);
799 return LT.first * AVX1CostTbl[Idx].Cost;
802 if (ST->hasSSE42()) {
803 int Idx = CostTableLookup(SSE42CostTbl, ISD, MTy);
805 return LT.first * SSE42CostTbl[Idx].Cost;
808 return BaseT::getCmpSelInstrCost(Opcode, ValTy, CondTy);
811 int X86TTIImpl::getVectorInstrCost(unsigned Opcode, Type *Val, unsigned Index) {
812 assert(Val->isVectorTy() && "This must be a vector type");
815 // Legalize the type.
816 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Val);
818 // This type is legalized to a scalar type.
819 if (!LT.second.isVector())
822 // The type may be split. Normalize the index to the new type.
823 unsigned Width = LT.second.getVectorNumElements();
824 Index = Index % Width;
826 // Floating point scalars are already located in index #0.
827 if (Val->getScalarType()->isFloatingPointTy() && Index == 0)
831 return BaseT::getVectorInstrCost(Opcode, Val, Index);
834 int X86TTIImpl::getScalarizationOverhead(Type *Ty, bool Insert, bool Extract) {
835 assert (Ty->isVectorTy() && "Can only scalarize vectors");
838 for (int i = 0, e = Ty->getVectorNumElements(); i < e; ++i) {
840 Cost += getVectorInstrCost(Instruction::InsertElement, Ty, i);
842 Cost += getVectorInstrCost(Instruction::ExtractElement, Ty, i);
848 int X86TTIImpl::getMemoryOpCost(unsigned Opcode, Type *Src, unsigned Alignment,
849 unsigned AddressSpace) {
850 // Handle non-power-of-two vectors such as <3 x float>
851 if (VectorType *VTy = dyn_cast<VectorType>(Src)) {
852 unsigned NumElem = VTy->getVectorNumElements();
854 // Handle a few common cases:
856 if (NumElem == 3 && VTy->getScalarSizeInBits() == 32)
857 // Cost = 64 bit store + extract + 32 bit store.
861 if (NumElem == 3 && VTy->getScalarSizeInBits() == 64)
862 // Cost = 128 bit store + unpack + 64 bit store.
865 // Assume that all other non-power-of-two numbers are scalarized.
866 if (!isPowerOf2_32(NumElem)) {
867 int Cost = BaseT::getMemoryOpCost(Opcode, VTy->getScalarType(), Alignment,
869 int SplitCost = getScalarizationOverhead(Src, Opcode == Instruction::Load,
870 Opcode == Instruction::Store);
871 return NumElem * Cost + SplitCost;
875 // Legalize the type.
876 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, Src);
877 assert((Opcode == Instruction::Load || Opcode == Instruction::Store) &&
880 // Each load/store unit costs 1.
881 int Cost = LT.first * 1;
883 // On Sandybridge 256bit load/stores are double pumped
884 // (but not on Haswell).
885 if (LT.second.getSizeInBits() > 128 && !ST->hasAVX2())
891 int X86TTIImpl::getMaskedMemoryOpCost(unsigned Opcode, Type *SrcTy,
893 unsigned AddressSpace) {
894 VectorType *SrcVTy = dyn_cast<VectorType>(SrcTy);
896 // To calculate scalar take the regular cost, without mask
897 return getMemoryOpCost(Opcode, SrcTy, Alignment, AddressSpace);
899 unsigned NumElem = SrcVTy->getVectorNumElements();
901 VectorType::get(Type::getInt8Ty(getGlobalContext()), NumElem);
902 if ((Opcode == Instruction::Load && !isLegalMaskedLoad(SrcVTy)) ||
903 (Opcode == Instruction::Store && !isLegalMaskedStore(SrcVTy)) ||
904 !isPowerOf2_32(NumElem)) {
906 int MaskSplitCost = getScalarizationOverhead(MaskTy, false, true);
907 int ScalarCompareCost = getCmpSelInstrCost(
908 Instruction::ICmp, Type::getInt8Ty(getGlobalContext()), nullptr);
909 int BranchCost = getCFInstrCost(Instruction::Br);
910 int MaskCmpCost = NumElem * (BranchCost + ScalarCompareCost);
912 int ValueSplitCost = getScalarizationOverhead(
913 SrcVTy, Opcode == Instruction::Load, Opcode == Instruction::Store);
915 NumElem * BaseT::getMemoryOpCost(Opcode, SrcVTy->getScalarType(),
916 Alignment, AddressSpace);
917 return MemopCost + ValueSplitCost + MaskSplitCost + MaskCmpCost;
920 // Legalize the type.
921 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, SrcVTy);
923 if (LT.second != TLI->getValueType(DL, SrcVTy).getSimpleVT() &&
924 LT.second.getVectorNumElements() == NumElem)
925 // Promotion requires expand/truncate for data and a shuffle for mask.
926 Cost += getShuffleCost(TTI::SK_Alternate, SrcVTy, 0, nullptr) +
927 getShuffleCost(TTI::SK_Alternate, MaskTy, 0, nullptr);
929 else if (LT.second.getVectorNumElements() > NumElem) {
930 VectorType *NewMaskTy = VectorType::get(MaskTy->getVectorElementType(),
931 LT.second.getVectorNumElements());
932 // Expanding requires fill mask with zeroes
933 Cost += getShuffleCost(TTI::SK_InsertSubvector, NewMaskTy, 0, MaskTy);
935 if (!ST->hasAVX512())
936 return Cost + LT.first*4; // Each maskmov costs 4
938 // AVX-512 masked load/store is cheapper
939 return Cost+LT.first;
942 int X86TTIImpl::getAddressComputationCost(Type *Ty, bool IsComplex) {
943 // Address computations in vectorized code with non-consecutive addresses will
944 // likely result in more instructions compared to scalar code where the
945 // computation can more often be merged into the index mode. The resulting
946 // extra micro-ops can significantly decrease throughput.
947 unsigned NumVectorInstToHideOverhead = 10;
949 if (Ty->isVectorTy() && IsComplex)
950 return NumVectorInstToHideOverhead;
952 return BaseT::getAddressComputationCost(Ty, IsComplex);
955 int X86TTIImpl::getReductionCost(unsigned Opcode, Type *ValTy,
958 std::pair<int, MVT> LT = TLI->getTypeLegalizationCost(DL, ValTy);
962 int ISD = TLI->InstructionOpcodeToISD(Opcode);
963 assert(ISD && "Invalid opcode");
965 // We use the Intel Architecture Code Analyzer(IACA) to measure the throughput
966 // and make it as the cost.
968 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblPairWise[] = {
969 { ISD::FADD, MVT::v2f64, 2 },
970 { ISD::FADD, MVT::v4f32, 4 },
971 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
972 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
973 { ISD::ADD, MVT::v8i16, 5 },
976 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblPairWise[] = {
977 { ISD::FADD, MVT::v4f32, 4 },
978 { ISD::FADD, MVT::v4f64, 5 },
979 { ISD::FADD, MVT::v8f32, 7 },
980 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
981 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.5".
982 { ISD::ADD, MVT::v4i64, 5 }, // The data reported by the IACA tool is "4.8".
983 { ISD::ADD, MVT::v8i16, 5 },
984 { ISD::ADD, MVT::v8i32, 5 },
987 static const CostTblEntry<MVT::SimpleValueType> SSE42CostTblNoPairWise[] = {
988 { ISD::FADD, MVT::v2f64, 2 },
989 { ISD::FADD, MVT::v4f32, 4 },
990 { ISD::ADD, MVT::v2i64, 2 }, // The data reported by the IACA tool is "1.6".
991 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "3.3".
992 { ISD::ADD, MVT::v8i16, 4 }, // The data reported by the IACA tool is "4.3".
995 static const CostTblEntry<MVT::SimpleValueType> AVX1CostTblNoPairWise[] = {
996 { ISD::FADD, MVT::v4f32, 3 },
997 { ISD::FADD, MVT::v4f64, 3 },
998 { ISD::FADD, MVT::v8f32, 4 },
999 { ISD::ADD, MVT::v2i64, 1 }, // The data reported by the IACA tool is "1.5".
1000 { ISD::ADD, MVT::v4i32, 3 }, // The data reported by the IACA tool is "2.8".
1001 { ISD::ADD, MVT::v4i64, 3 },
1002 { ISD::ADD, MVT::v8i16, 4 },
1003 { ISD::ADD, MVT::v8i32, 5 },
1008 int Idx = CostTableLookup(AVX1CostTblPairWise, ISD, MTy);
1010 return LT.first * AVX1CostTblPairWise[Idx].Cost;
1013 if (ST->hasSSE42()) {
1014 int Idx = CostTableLookup(SSE42CostTblPairWise, ISD, MTy);
1016 return LT.first * SSE42CostTblPairWise[Idx].Cost;
1020 int Idx = CostTableLookup(AVX1CostTblNoPairWise, ISD, MTy);
1022 return LT.first * AVX1CostTblNoPairWise[Idx].Cost;
1025 if (ST->hasSSE42()) {
1026 int Idx = CostTableLookup(SSE42CostTblNoPairWise, ISD, MTy);
1028 return LT.first * SSE42CostTblNoPairWise[Idx].Cost;
1032 return BaseT::getReductionCost(Opcode, ValTy, IsPairwise);
1035 /// \brief Calculate the cost of materializing a 64-bit value. This helper
1036 /// method might only calculate a fraction of a larger immediate. Therefore it
1037 /// is valid to return a cost of ZERO.
1038 int X86TTIImpl::getIntImmCost(int64_t Val) {
1040 return TTI::TCC_Free;
1043 return TTI::TCC_Basic;
1045 return 2 * TTI::TCC_Basic;
1048 int X86TTIImpl::getIntImmCost(const APInt &Imm, Type *Ty) {
1049 assert(Ty->isIntegerTy());
1051 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1055 // Never hoist constants larger than 128bit, because this might lead to
1056 // incorrect code generation or assertions in codegen.
1057 // Fixme: Create a cost model for types larger than i128 once the codegen
1058 // issues have been fixed.
1060 return TTI::TCC_Free;
1063 return TTI::TCC_Free;
1065 // Sign-extend all constants to a multiple of 64-bit.
1068 ImmVal = Imm.sext((BitSize + 63) & ~0x3fU);
1070 // Split the constant into 64-bit chunks and calculate the cost for each
1073 for (unsigned ShiftVal = 0; ShiftVal < BitSize; ShiftVal += 64) {
1074 APInt Tmp = ImmVal.ashr(ShiftVal).sextOrTrunc(64);
1075 int64_t Val = Tmp.getSExtValue();
1076 Cost += getIntImmCost(Val);
1078 // We need at least one instruction to materialze the constant.
1079 return std::max(1, Cost);
1082 int X86TTIImpl::getIntImmCost(unsigned Opcode, unsigned Idx, const APInt &Imm,
1084 assert(Ty->isIntegerTy());
1086 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1087 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1088 // here, so that constant hoisting will ignore this constant.
1090 return TTI::TCC_Free;
1092 unsigned ImmIdx = ~0U;
1095 return TTI::TCC_Free;
1096 case Instruction::GetElementPtr:
1097 // Always hoist the base address of a GetElementPtr. This prevents the
1098 // creation of new constants for every base constant that gets constant
1099 // folded with the offset.
1101 return 2 * TTI::TCC_Basic;
1102 return TTI::TCC_Free;
1103 case Instruction::Store:
1106 case Instruction::And:
1107 // We support 64-bit ANDs with immediates with 32-bits of leading zeroes
1108 // by using a 32-bit operation with implicit zero extension. Detect such
1109 // immediates here as the normal path expects bit 31 to be sign extended.
1110 if (Idx == 1 && Imm.getBitWidth() == 64 && isUInt<32>(Imm.getZExtValue()))
1111 return TTI::TCC_Free;
1113 case Instruction::Add:
1114 case Instruction::Sub:
1115 case Instruction::Mul:
1116 case Instruction::UDiv:
1117 case Instruction::SDiv:
1118 case Instruction::URem:
1119 case Instruction::SRem:
1120 case Instruction::Or:
1121 case Instruction::Xor:
1122 case Instruction::ICmp:
1125 // Always return TCC_Free for the shift value of a shift instruction.
1126 case Instruction::Shl:
1127 case Instruction::LShr:
1128 case Instruction::AShr:
1130 return TTI::TCC_Free;
1132 case Instruction::Trunc:
1133 case Instruction::ZExt:
1134 case Instruction::SExt:
1135 case Instruction::IntToPtr:
1136 case Instruction::PtrToInt:
1137 case Instruction::BitCast:
1138 case Instruction::PHI:
1139 case Instruction::Call:
1140 case Instruction::Select:
1141 case Instruction::Ret:
1142 case Instruction::Load:
1146 if (Idx == ImmIdx) {
1147 int NumConstants = (BitSize + 63) / 64;
1148 int Cost = X86TTIImpl::getIntImmCost(Imm, Ty);
1149 return (Cost <= NumConstants * TTI::TCC_Basic)
1150 ? static_cast<int>(TTI::TCC_Free)
1154 return X86TTIImpl::getIntImmCost(Imm, Ty);
1157 int X86TTIImpl::getIntImmCost(Intrinsic::ID IID, unsigned Idx, const APInt &Imm,
1159 assert(Ty->isIntegerTy());
1161 unsigned BitSize = Ty->getPrimitiveSizeInBits();
1162 // There is no cost model for constants with a bit size of 0. Return TCC_Free
1163 // here, so that constant hoisting will ignore this constant.
1165 return TTI::TCC_Free;
1169 return TTI::TCC_Free;
1170 case Intrinsic::sadd_with_overflow:
1171 case Intrinsic::uadd_with_overflow:
1172 case Intrinsic::ssub_with_overflow:
1173 case Intrinsic::usub_with_overflow:
1174 case Intrinsic::smul_with_overflow:
1175 case Intrinsic::umul_with_overflow:
1176 if ((Idx == 1) && Imm.getBitWidth() <= 64 && isInt<32>(Imm.getSExtValue()))
1177 return TTI::TCC_Free;
1179 case Intrinsic::experimental_stackmap:
1180 if ((Idx < 2) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1181 return TTI::TCC_Free;
1183 case Intrinsic::experimental_patchpoint_void:
1184 case Intrinsic::experimental_patchpoint_i64:
1185 if ((Idx < 4) || (Imm.getBitWidth() <= 64 && isInt<64>(Imm.getSExtValue())))
1186 return TTI::TCC_Free;
1189 return X86TTIImpl::getIntImmCost(Imm, Ty);
1192 bool X86TTIImpl::isLegalMaskedLoad(Type *DataTy) {
1193 Type *ScalarTy = DataTy->getScalarType();
1194 int DataWidth = ScalarTy->isPointerTy() ? DL.getPointerSizeInBits() :
1195 ScalarTy->getPrimitiveSizeInBits();
1197 return (DataWidth >= 32 && ST->hasAVX2());
1200 bool X86TTIImpl::isLegalMaskedStore(Type *DataType) {
1201 return isLegalMaskedLoad(DataType);
1204 bool X86TTIImpl::areInlineCompatible(const Function *Caller,
1205 const Function *Callee) const {
1206 const TargetMachine &TM = getTLI()->getTargetMachine();
1208 // Work this as a subsetting of subtarget features.
1209 const FeatureBitset &CallerBits =
1210 TM.getSubtargetImpl(*Caller)->getFeatureBits();
1211 const FeatureBitset &CalleeBits =
1212 TM.getSubtargetImpl(*Callee)->getFeatureBits();
1214 // FIXME: This is likely too limiting as it will include subtarget features
1215 // that we might not care about for inlining, but it is conservatively
1217 return (CallerBits & CalleeBits) == CalleeBits;