1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/CallingConv.h"
33 #include "llvm/IR/IRBuilder.h"
34 #include "llvm/IR/InlineAsm.h"
35 #include "llvm/IR/Instructions.h"
36 #include "llvm/MC/MCRegisterInfo.h"
37 #include "llvm/Target/TargetCallingConv.h"
38 #include "llvm/Target/TargetMachine.h"
47 class FunctionLoweringInfo;
48 class ImmutableCallSite;
50 class MachineBasicBlock;
51 class MachineFunction;
53 class MachineJumpTableInfo;
59 template<typename T> class SmallVectorImpl;
61 class TargetRegisterClass;
62 class TargetLibraryInfo;
63 class TargetLoweringObjectFile;
68 None, // No preference
69 Source, // Follow source order.
70 RegPressure, // Scheduling for lowest register pressure.
71 Hybrid, // Scheduling for both latency and register pressure.
72 ILP, // Scheduling for ILP in low register pressure mode.
73 VLIW // Scheduling for VLIW targets.
77 /// This base class for TargetLowering contains the SelectionDAG-independent
78 /// parts that can be used from the rest of CodeGen.
79 class TargetLoweringBase {
80 TargetLoweringBase(const TargetLoweringBase&) = delete;
81 void operator=(const TargetLoweringBase&) = delete;
84 /// This enum indicates whether operations are valid for a target, and if not,
85 /// what action should be used to make them valid.
86 enum LegalizeAction : uint8_t {
87 Legal, // The target natively supports this operation.
88 Promote, // This operation should be executed in a larger type.
89 Expand, // Try to expand this to other ops, otherwise use a libcall.
90 LibCall, // Don't try to expand this to other ops, always use a libcall.
91 Custom // Use the LowerOperation hook to implement custom lowering.
94 /// This enum indicates whether a types are legal for a target, and if not,
95 /// what action should be used to make them valid.
96 enum LegalizeTypeAction : uint8_t {
97 TypeLegal, // The target natively supports this type.
98 TypePromoteInteger, // Replace this integer with a larger one.
99 TypeExpandInteger, // Split this integer into two of half the size.
100 TypeSoftenFloat, // Convert this float to a same size integer type,
101 // if an operation is not supported in target HW.
102 TypeExpandFloat, // Split this float into two of half the size.
103 TypeScalarizeVector, // Replace this one-element vector with its element.
104 TypeSplitVector, // Split this vector into two of half the size.
105 TypeWidenVector, // This vector should be widened into a larger vector.
106 TypePromoteFloat // Replace this float with a larger one.
109 /// LegalizeKind holds the legalization kind that needs to happen to EVT
110 /// in order to type-legalize it.
111 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
113 /// Enum that describes how the target represents true/false values.
114 enum BooleanContent {
115 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
116 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
117 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
120 /// Enum that describes what type of support for selects the target has.
121 enum SelectSupportKind {
122 ScalarValSelect, // The target supports scalar selects (ex: cmov).
123 ScalarCondVectorVal, // The target supports selects with a scalar condition
124 // and vector values (ex: cmov).
125 VectorMaskSelect // The target supports vector selects with a vector
126 // mask (ex: x86 blends).
129 /// Enum that specifies what an atomic load/AtomicRMWInst is expanded
130 /// to, if at all. Exists because different targets have different levels of
131 /// support for these atomic instructions, and also have different options
132 /// w.r.t. what they should expand to.
133 enum class AtomicExpansionKind {
134 None, // Don't expand the instruction.
135 LLSC, // Expand the instruction into loadlinked/storeconditional; used
137 LLOnly, // Expand the (load) instruction into just a load-linked, which has
138 // greater atomic guarantees than a normal load.
139 CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
142 static ISD::NodeType getExtendForContent(BooleanContent Content) {
144 case UndefinedBooleanContent:
145 // Extend by adding rubbish bits.
146 return ISD::ANY_EXTEND;
147 case ZeroOrOneBooleanContent:
148 // Extend by adding zero bits.
149 return ISD::ZERO_EXTEND;
150 case ZeroOrNegativeOneBooleanContent:
151 // Extend by copying the sign bit.
152 return ISD::SIGN_EXTEND;
154 llvm_unreachable("Invalid content kind");
157 /// NOTE: The TargetMachine owns TLOF.
158 explicit TargetLoweringBase(const TargetMachine &TM);
159 virtual ~TargetLoweringBase() {}
162 /// \brief Initialize all of the actions to default values.
166 const TargetMachine &getTargetMachine() const { return TM; }
168 virtual bool useSoftFloat() const { return false; }
170 /// Return the pointer type for the given address space, defaults to
171 /// the pointer type from the data layout.
172 /// FIXME: The default needs to be removed once all the code is updated.
173 MVT getPointerTy(const DataLayout &DL, uint32_t AS = 0) const {
174 return MVT::getIntegerVT(DL.getPointerSizeInBits(AS));
177 /// EVT is not used in-tree, but is used by out-of-tree target.
178 /// A documentation for this function would be nice...
179 virtual MVT getScalarShiftAmountTy(const DataLayout &, EVT) const;
181 EVT getShiftAmountTy(EVT LHSTy, const DataLayout &DL) const;
183 /// Returns the type to be used for the index operand of:
184 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
185 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
186 virtual MVT getVectorIdxTy(const DataLayout &DL) const {
187 return getPointerTy(DL);
190 /// Return true if the select operation is expensive for this target.
191 bool isSelectExpensive() const { return SelectIsExpensive; }
193 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
197 /// Return true if multiple condition registers are available.
198 bool hasMultipleConditionRegisters() const {
199 return HasMultipleConditionRegisters;
202 /// Return true if the target has BitExtract instructions.
203 bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
205 /// Return the preferred vector type legalization action.
206 virtual TargetLoweringBase::LegalizeTypeAction
207 getPreferredVectorAction(EVT VT) const {
208 // The default action for one element vectors is to scalarize
209 if (VT.getVectorNumElements() == 1)
210 return TypeScalarizeVector;
211 // The default action for other vectors is to promote
212 return TypePromoteInteger;
215 // There are two general methods for expanding a BUILD_VECTOR node:
216 // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
218 // 2. Build the vector on the stack and then load it.
219 // If this function returns true, then method (1) will be used, subject to
220 // the constraint that all of the necessary shuffles are legal (as determined
221 // by isShuffleMaskLegal). If this function returns false, then method (2) is
222 // always used. The vector type, and the number of defined values, are
225 shouldExpandBuildVectorWithShuffles(EVT /* VT */,
226 unsigned DefinedValues) const {
227 return DefinedValues < 3;
230 /// Return true if integer divide is usually cheaper than a sequence of
231 /// several shifts, adds, and multiplies for this target.
232 /// The definition of "cheaper" may depend on whether we're optimizing
233 /// for speed or for size.
234 virtual bool isIntDivCheap(EVT VT, AttributeSet Attr) const {
238 /// Return true if sqrt(x) is as cheap or cheaper than 1 / rsqrt(x)
239 bool isFsqrtCheap() const {
243 /// Returns true if target has indicated at least one type should be bypassed.
244 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
246 /// Returns map of slow types for division or remainder with corresponding
248 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
249 return BypassSlowDivWidths;
252 /// Return true if Flow Control is an expensive operation that should be
254 bool isJumpExpensive() const { return JumpIsExpensive; }
256 /// Return true if selects are only cheaper than branches if the branch is
257 /// unlikely to be predicted right.
258 bool isPredictableSelectExpensive() const {
259 return PredictableSelectIsExpensive;
262 /// isLoadBitCastBeneficial() - Return true if the following transform
264 /// fold (conv (load x)) -> (load (conv*)x)
265 /// On architectures that don't natively support some vector loads
266 /// efficiently, casting the load to a smaller vector of larger types and
267 /// loading is more efficient, however, this can be undone by optimizations in
269 virtual bool isLoadBitCastBeneficial(EVT /* Load */,
270 EVT /* Bitcast */) const {
274 /// Return true if it is expected to be cheaper to do a store of a non-zero
275 /// vector constant with the given size and type for the address space than to
276 /// store the individual scalar element constants.
277 virtual bool storeOfVectorConstantIsCheap(EVT MemVT,
279 unsigned AddrSpace) const {
283 /// \brief Return true if it is cheap to speculate a call to intrinsic cttz.
284 virtual bool isCheapToSpeculateCttz() const {
288 /// \brief Return true if it is cheap to speculate a call to intrinsic ctlz.
289 virtual bool isCheapToSpeculateCtlz() const {
293 /// \brief Return if the target supports combining a
296 /// %andResult = and %val1, #imm-with-one-bit-set;
297 /// %icmpResult = icmp %andResult, 0
298 /// br i1 %icmpResult, label %dest1, label %dest2
300 /// into a single machine instruction of a form like:
302 /// brOnBitSet %register, #bitNumber, dest
304 bool isMaskAndBranchFoldingLegal() const {
305 return MaskAndBranchFoldingIsLegal;
308 /// \brief Return true if the target wants to use the optimization that
309 /// turns ext(promotableInst1(...(promotableInstN(load)))) into
310 /// promotedInst1(...(promotedInstN(ext(load)))).
311 bool enableExtLdPromotion() const { return EnableExtLdPromotion; }
313 /// Return true if the target can combine store(extractelement VectorTy,
315 /// \p Cost[out] gives the cost of that transformation when this is true.
316 virtual bool canCombineStoreAndExtract(Type *VectorTy, Value *Idx,
317 unsigned &Cost) const {
321 /// Return true if target supports floating point exceptions.
322 bool hasFloatingPointExceptions() const {
323 return HasFloatingPointExceptions;
326 /// Return true if target always beneficiates from combining into FMA for a
327 /// given value type. This must typically return false on targets where FMA
328 /// takes more cycles to execute than FADD.
329 virtual bool enableAggressiveFMAFusion(EVT VT) const {
333 /// Return the ValueType of the result of SETCC operations.
334 virtual EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
337 /// Return the ValueType for comparison libcalls. Comparions libcalls include
338 /// floating point comparion calls, and Ordered/Unordered check calls on
339 /// floating point numbers.
341 MVT::SimpleValueType getCmpLibcallReturnType() const;
343 /// For targets without i1 registers, this gives the nature of the high-bits
344 /// of boolean values held in types wider than i1.
346 /// "Boolean values" are special true/false values produced by nodes like
347 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
348 /// Not to be confused with general values promoted from i1. Some cpus
349 /// distinguish between vectors of boolean and scalars; the isVec parameter
350 /// selects between the two kinds. For example on X86 a scalar boolean should
351 /// be zero extended from i1, while the elements of a vector of booleans
352 /// should be sign extended from i1.
354 /// Some cpus also treat floating point types the same way as they treat
355 /// vectors instead of the way they treat scalars.
356 BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
358 return BooleanVectorContents;
359 return isFloat ? BooleanFloatContents : BooleanContents;
362 BooleanContent getBooleanContents(EVT Type) const {
363 return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
366 /// Return target scheduling preference.
367 Sched::Preference getSchedulingPreference() const {
368 return SchedPreferenceInfo;
371 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
372 /// for different nodes. This function returns the preference (or none) for
374 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
378 /// Return the register class that should be used for the specified value
380 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
381 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
382 assert(RC && "This value type is not natively supported!");
386 /// Return the 'representative' register class for the specified value
389 /// The 'representative' register class is the largest legal super-reg
390 /// register class for the register class of the value type. For example, on
391 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
392 /// register class is GR64 on x86_64.
393 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
394 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
398 /// Return the cost of the 'representative' register class for the specified
400 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
401 return RepRegClassCostForVT[VT.SimpleTy];
404 /// Return true if the target has native support for the specified value type.
405 /// This means that it has a register that directly holds it without
406 /// promotions or expansions.
407 bool isTypeLegal(EVT VT) const {
408 assert(!VT.isSimple() ||
409 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
410 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
413 class ValueTypeActionImpl {
414 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
415 /// that indicates how instruction selection should deal with the type.
416 LegalizeTypeAction ValueTypeActions[MVT::LAST_VALUETYPE];
419 ValueTypeActionImpl() {
420 std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions),
424 LegalizeTypeAction getTypeAction(MVT VT) const {
425 return ValueTypeActions[VT.SimpleTy];
428 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
429 ValueTypeActions[VT.SimpleTy] = Action;
433 const ValueTypeActionImpl &getValueTypeActions() const {
434 return ValueTypeActions;
437 /// Return how we should legalize values of this type, either it is already
438 /// legal (return 'Legal') or we need to promote it to a larger type (return
439 /// 'Promote'), or we need to expand it into multiple registers of smaller
440 /// integer type (return 'Expand'). 'Custom' is not an option.
441 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
442 return getTypeConversion(Context, VT).first;
444 LegalizeTypeAction getTypeAction(MVT VT) const {
445 return ValueTypeActions.getTypeAction(VT);
448 /// For types supported by the target, this is an identity function. For
449 /// types that must be promoted to larger types, this returns the larger type
450 /// to promote to. For integer types that are larger than the largest integer
451 /// register, this contains one step in the expansion to get to the smaller
452 /// register. For illegal floating point types, this returns the integer type
454 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
455 return getTypeConversion(Context, VT).second;
458 /// For types supported by the target, this is an identity function. For
459 /// types that must be expanded (i.e. integer types that are larger than the
460 /// largest integer register or illegal floating point types), this returns
461 /// the largest legal type it will be expanded to.
462 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
463 assert(!VT.isVector());
465 switch (getTypeAction(Context, VT)) {
468 case TypeExpandInteger:
469 VT = getTypeToTransformTo(Context, VT);
472 llvm_unreachable("Type is not legal nor is it to be expanded!");
477 /// Vector types are broken down into some number of legal first class types.
478 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
479 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
480 /// turns into 4 EVT::i32 values with both PPC and X86.
482 /// This method returns the number of registers needed, and the VT for each
483 /// register. It also returns the VT and quantity of the intermediate values
484 /// before they are promoted/expanded.
485 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
487 unsigned &NumIntermediates,
488 MVT &RegisterVT) const;
490 struct IntrinsicInfo {
491 unsigned opc; // target opcode
492 EVT memVT; // memory VT
493 const Value* ptrVal; // value representing memory location
494 int offset; // offset off of ptrVal
495 unsigned size; // the size of the memory location
496 // (taken from memVT if zero)
497 unsigned align; // alignment
498 bool vol; // is volatile?
499 bool readMem; // reads memory?
500 bool writeMem; // writes memory?
502 IntrinsicInfo() : opc(0), ptrVal(nullptr), offset(0), size(0), align(1),
503 vol(false), readMem(false), writeMem(false) {}
506 /// Given an intrinsic, checks if on the target the intrinsic will need to map
507 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
508 /// true and store the intrinsic information into the IntrinsicInfo that was
509 /// passed to the function.
510 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
511 unsigned /*Intrinsic*/) const {
515 /// Returns true if the target can instruction select the specified FP
516 /// immediate natively. If false, the legalizer will materialize the FP
517 /// immediate as a load from a constant pool.
518 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
522 /// Targets can use this to indicate that they only support *some*
523 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
524 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
526 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
531 /// Returns true if the operation can trap for the value type.
533 /// VT must be a legal type. By default, we optimistically assume most
534 /// operations don't trap except for divide and remainder.
535 virtual bool canOpTrap(unsigned Op, EVT VT) const;
537 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
538 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
539 /// a VAND with a constant pool entry.
540 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
545 /// Return how this operation should be treated: either it is legal, needs to
546 /// be promoted to a larger size, needs to be expanded to some other code
547 /// sequence, or the target has a custom expander for it.
548 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
549 if (VT.isExtended()) return Expand;
550 // If a target-specific SDNode requires legalization, require the target
551 // to provide custom legalization for it.
552 if (Op > array_lengthof(OpActions[0])) return Custom;
553 return OpActions[(unsigned)VT.getSimpleVT().SimpleTy][Op];
556 /// Return true if the specified operation is legal on this target or can be
557 /// made legal with custom lowering. This is used to help guide high-level
558 /// lowering decisions.
559 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
560 return (VT == MVT::Other || isTypeLegal(VT)) &&
561 (getOperationAction(Op, VT) == Legal ||
562 getOperationAction(Op, VT) == Custom);
565 /// Return true if the specified operation is legal on this target or can be
566 /// made legal using promotion. This is used to help guide high-level lowering
568 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
569 return (VT == MVT::Other || isTypeLegal(VT)) &&
570 (getOperationAction(Op, VT) == Legal ||
571 getOperationAction(Op, VT) == Promote);
574 /// Return true if the specified operation is illegal on this target or
575 /// unlikely to be made legal with custom lowering. This is used to help guide
576 /// high-level lowering decisions.
577 bool isOperationExpand(unsigned Op, EVT VT) const {
578 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
581 /// Return true if the specified operation is legal on this target.
582 bool isOperationLegal(unsigned Op, EVT VT) const {
583 return (VT == MVT::Other || isTypeLegal(VT)) &&
584 getOperationAction(Op, VT) == Legal;
587 /// Return how this load with extension should be treated: either it is legal,
588 /// needs to be promoted to a larger size, needs to be expanded to some other
589 /// code sequence, or the target has a custom expander for it.
590 LegalizeAction getLoadExtAction(unsigned ExtType, EVT ValVT,
592 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
593 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
594 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
595 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValI < MVT::LAST_VALUETYPE &&
596 MemI < MVT::LAST_VALUETYPE && "Table isn't big enough!");
597 return LoadExtActions[ValI][MemI][ExtType];
600 /// Return true if the specified load with extension is legal on this target.
601 bool isLoadExtLegal(unsigned ExtType, EVT ValVT, EVT MemVT) const {
602 return ValVT.isSimple() && MemVT.isSimple() &&
603 getLoadExtAction(ExtType, ValVT, MemVT) == Legal;
606 /// Return true if the specified load with extension is legal or custom
608 bool isLoadExtLegalOrCustom(unsigned ExtType, EVT ValVT, EVT MemVT) const {
609 return ValVT.isSimple() && MemVT.isSimple() &&
610 (getLoadExtAction(ExtType, ValVT, MemVT) == Legal ||
611 getLoadExtAction(ExtType, ValVT, MemVT) == Custom);
614 /// Return how this store with truncation should be treated: either it is
615 /// legal, needs to be promoted to a larger size, needs to be expanded to some
616 /// other code sequence, or the target has a custom expander for it.
617 LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
618 if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
619 unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
620 unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
621 assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
622 "Table isn't big enough!");
623 return TruncStoreActions[ValI][MemI];
626 /// Return true if the specified store with truncation is legal on this
628 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
629 return isTypeLegal(ValVT) && MemVT.isSimple() &&
630 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
633 /// Return how the indexed load should be treated: either it is legal, needs
634 /// to be promoted to a larger size, needs to be expanded to some other code
635 /// sequence, or the target has a custom expander for it.
637 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
638 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
639 "Table isn't big enough!");
640 unsigned Ty = (unsigned)VT.SimpleTy;
641 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
644 /// Return true if the specified indexed load is legal on this target.
645 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
646 return VT.isSimple() &&
647 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
648 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
651 /// Return how the indexed store should be treated: either it is legal, needs
652 /// to be promoted to a larger size, needs to be expanded to some other code
653 /// sequence, or the target has a custom expander for it.
655 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
656 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT.isValid() &&
657 "Table isn't big enough!");
658 unsigned Ty = (unsigned)VT.SimpleTy;
659 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
662 /// Return true if the specified indexed load is legal on this target.
663 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
664 return VT.isSimple() &&
665 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
666 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
669 /// Return how the condition code should be treated: either it is legal, needs
670 /// to be expanded to some other code sequence, or the target has a custom
673 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
674 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
675 ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
676 "Table isn't big enough!");
677 // See setCondCodeAction for how this is encoded.
678 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
679 uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 3];
680 LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0xF);
681 assert(Action != Promote && "Can't promote condition code!");
685 /// Return true if the specified condition code is legal on this target.
686 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
688 getCondCodeAction(CC, VT) == Legal ||
689 getCondCodeAction(CC, VT) == Custom;
693 /// If the action for this operation is to promote, this method returns the
694 /// ValueType to promote to.
695 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
696 assert(getOperationAction(Op, VT) == Promote &&
697 "This operation isn't promoted!");
699 // See if this has an explicit type specified.
700 std::map<std::pair<unsigned, MVT::SimpleValueType>,
701 MVT::SimpleValueType>::const_iterator PTTI =
702 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
703 if (PTTI != PromoteToType.end()) return PTTI->second;
705 assert((VT.isInteger() || VT.isFloatingPoint()) &&
706 "Cannot autopromote this type, add it with AddPromotedToType.");
710 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
711 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
712 "Didn't find type to promote to!");
713 } while (!isTypeLegal(NVT) ||
714 getOperationAction(Op, NVT) == Promote);
718 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
719 /// operations except for the pointer size. If AllowUnknown is true, this
720 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
721 /// otherwise it will assert.
722 EVT getValueType(const DataLayout &DL, Type *Ty,
723 bool AllowUnknown = false) const {
724 // Lower scalar pointers to native pointer types.
725 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
726 return getPointerTy(DL, PTy->getAddressSpace());
728 if (Ty->isVectorTy()) {
729 VectorType *VTy = cast<VectorType>(Ty);
730 Type *Elm = VTy->getElementType();
731 // Lower vectors of pointers to native pointer types.
732 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
733 EVT PointerTy(getPointerTy(DL, PT->getAddressSpace()));
734 Elm = PointerTy.getTypeForEVT(Ty->getContext());
737 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
738 VTy->getNumElements());
740 return EVT::getEVT(Ty, AllowUnknown);
743 /// Return the MVT corresponding to this LLVM type. See getValueType.
744 MVT getSimpleValueType(const DataLayout &DL, Type *Ty,
745 bool AllowUnknown = false) const {
746 return getValueType(DL, Ty, AllowUnknown).getSimpleVT();
749 /// Return the desired alignment for ByVal or InAlloca aggregate function
750 /// arguments in the caller parameter area. This is the actual alignment, not
752 virtual unsigned getByValTypeAlignment(Type *Ty, const DataLayout &DL) const;
754 /// Return the type of registers that this ValueType will eventually require.
755 MVT getRegisterType(MVT VT) const {
756 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
757 return RegisterTypeForVT[VT.SimpleTy];
760 /// Return the type of registers that this ValueType will eventually require.
761 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
763 assert((unsigned)VT.getSimpleVT().SimpleTy <
764 array_lengthof(RegisterTypeForVT));
765 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
770 unsigned NumIntermediates;
771 (void)getVectorTypeBreakdown(Context, VT, VT1,
772 NumIntermediates, RegisterVT);
775 if (VT.isInteger()) {
776 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
778 llvm_unreachable("Unsupported extended type!");
781 /// Return the number of registers that this ValueType will eventually
784 /// This is one for any types promoted to live in larger registers, but may be
785 /// more than one for types (like i64) that are split into pieces. For types
786 /// like i140, which are first promoted then expanded, it is the number of
787 /// registers needed to hold all the bits of the original type. For an i140
788 /// on a 32 bit machine this means 5 registers.
789 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
791 assert((unsigned)VT.getSimpleVT().SimpleTy <
792 array_lengthof(NumRegistersForVT));
793 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
798 unsigned NumIntermediates;
799 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
801 if (VT.isInteger()) {
802 unsigned BitWidth = VT.getSizeInBits();
803 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
804 return (BitWidth + RegWidth - 1) / RegWidth;
806 llvm_unreachable("Unsupported extended type!");
809 /// If true, then instruction selection should seek to shrink the FP constant
810 /// of the specified type to a smaller type in order to save space and / or
812 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
814 // Return true if it is profitable to reduce the given load node to a smaller
817 // e.g. (i16 (trunc (i32 (load x))) -> i16 load x should be performed
818 virtual bool shouldReduceLoadWidth(SDNode *Load,
819 ISD::LoadExtType ExtTy,
824 /// When splitting a value of the specified type into parts, does the Lo
825 /// or Hi part come first? This usually follows the endianness, except
826 /// for ppcf128, where the Hi part always comes first.
827 bool hasBigEndianPartOrdering(EVT VT, const DataLayout &DL) const {
828 return DL.isBigEndian() || VT == MVT::ppcf128;
831 /// If true, the target has custom DAG combine transformations that it can
832 /// perform for the specified node.
833 bool hasTargetDAGCombine(ISD::NodeType NT) const {
834 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
835 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
838 unsigned getGatherAllAliasesMaxDepth() const {
839 return GatherAllAliasesMaxDepth;
842 /// \brief Get maximum # of store operations permitted for llvm.memset
844 /// This function returns the maximum number of store operations permitted
845 /// to replace a call to llvm.memset. The value is set by the target at the
846 /// performance threshold for such a replacement. If OptSize is true,
847 /// return the limit for functions that have OptSize attribute.
848 unsigned getMaxStoresPerMemset(bool OptSize) const {
849 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
852 /// \brief Get maximum # of store operations permitted for llvm.memcpy
854 /// This function returns the maximum number of store operations permitted
855 /// to replace a call to llvm.memcpy. The value is set by the target at the
856 /// performance threshold for such a replacement. If OptSize is true,
857 /// return the limit for functions that have OptSize attribute.
858 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
859 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
862 /// \brief Get maximum # of store operations permitted for llvm.memmove
864 /// This function returns the maximum number of store operations permitted
865 /// to replace a call to llvm.memmove. The value is set by the target at the
866 /// performance threshold for such a replacement. If OptSize is true,
867 /// return the limit for functions that have OptSize attribute.
868 unsigned getMaxStoresPerMemmove(bool OptSize) const {
869 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
872 /// \brief Determine if the target supports unaligned memory accesses.
874 /// This function returns true if the target allows unaligned memory accesses
875 /// of the specified type in the given address space. If true, it also returns
876 /// whether the unaligned memory access is "fast" in the last argument by
877 /// reference. This is used, for example, in situations where an array
878 /// copy/move/set is converted to a sequence of store operations. Its use
879 /// helps to ensure that such replacements don't generate code that causes an
880 /// alignment error (trap) on the target machine.
881 virtual bool allowsMisalignedMemoryAccesses(EVT,
882 unsigned AddrSpace = 0,
884 bool * /*Fast*/ = nullptr) const {
888 /// Return true if the target supports a memory access of this type for the
889 /// given address space and alignment. If the access is allowed, the optional
890 /// final parameter returns if the access is also fast (as defined by the
892 bool allowsMemoryAccess(LLVMContext &Context, const DataLayout &DL, EVT VT,
893 unsigned AddrSpace = 0, unsigned Alignment = 1,
894 bool *Fast = nullptr) const;
896 /// Returns the target specific optimal type for load and store operations as
897 /// a result of memset, memcpy, and memmove lowering.
899 /// If DstAlign is zero that means it's safe to destination alignment can
900 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
901 /// a need to check it against alignment requirement, probably because the
902 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
903 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
904 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
905 /// does not need to be loaded. It returns EVT::Other if the type should be
906 /// determined using generic target-independent logic.
907 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
908 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
911 bool /*MemcpyStrSrc*/,
912 MachineFunction &/*MF*/) const {
916 /// Returns true if it's safe to use load / store of the specified type to
917 /// expand memcpy / memset inline.
919 /// This is mostly true for all types except for some special cases. For
920 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
921 /// fstpl which also does type conversion. Note the specified type doesn't
922 /// have to be legal as the hook is used before type legalization.
923 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
925 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
926 bool usesUnderscoreSetJmp() const {
927 return UseUnderscoreSetJmp;
930 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
931 bool usesUnderscoreLongJmp() const {
932 return UseUnderscoreLongJmp;
935 /// Return integer threshold on number of blocks to use jump tables rather
936 /// than if sequence.
937 int getMinimumJumpTableEntries() const {
938 return MinimumJumpTableEntries;
941 /// If a physical register, this specifies the register that
942 /// llvm.savestack/llvm.restorestack should save and restore.
943 unsigned getStackPointerRegisterToSaveRestore() const {
944 return StackPointerRegisterToSaveRestore;
947 /// If a physical register, this returns the register that receives the
948 /// exception address on entry to an EH pad.
950 getExceptionPointerRegister(const Constant *PersonalityFn) const {
951 // 0 is guaranteed to be the NoRegister value on all targets
955 /// If a physical register, this returns the register that receives the
956 /// exception typeid on entry to a landing pad.
958 getExceptionSelectorRegister(const Constant *PersonalityFn) const {
959 // 0 is guaranteed to be the NoRegister value on all targets
963 /// Returns the target's jmp_buf size in bytes (if never set, the default is
965 unsigned getJumpBufSize() const {
969 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
971 unsigned getJumpBufAlignment() const {
972 return JumpBufAlignment;
975 /// Return the minimum stack alignment of an argument.
976 unsigned getMinStackArgumentAlignment() const {
977 return MinStackArgumentAlignment;
980 /// Return the minimum function alignment.
981 unsigned getMinFunctionAlignment() const {
982 return MinFunctionAlignment;
985 /// Return the preferred function alignment.
986 unsigned getPrefFunctionAlignment() const {
987 return PrefFunctionAlignment;
990 /// Return the preferred loop alignment.
991 virtual unsigned getPrefLoopAlignment(MachineLoop *ML = nullptr) const {
992 return PrefLoopAlignment;
995 /// Return whether the DAG builder should automatically insert fences and
996 /// reduce ordering for atomics.
997 bool getInsertFencesForAtomic() const {
998 return InsertFencesForAtomic;
1001 /// Return true if the target stores stack protector cookies at a fixed offset
1002 /// in some non-standard address space, and populates the address space and
1003 /// offset as appropriate.
1004 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
1005 unsigned &/*Offset*/) const {
1009 /// If the target has a standard location for the unsafe stack pointer,
1010 /// returns the address of that location. Otherwise, returns nullptr.
1011 virtual Value *getSafeStackPointerLocation(IRBuilder<> &IRB) const;
1013 /// Returns true if a cast between SrcAS and DestAS is a noop.
1014 virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
1018 /// Return true if the pointer arguments to CI should be aligned by aligning
1019 /// the object whose address is being passed. If so then MinSize is set to the
1020 /// minimum size the object must be to be aligned and PrefAlign is set to the
1021 /// preferred alignment.
1022 virtual bool shouldAlignPointerArgs(CallInst * /*CI*/, unsigned & /*MinSize*/,
1023 unsigned & /*PrefAlign*/) const {
1027 //===--------------------------------------------------------------------===//
1028 /// \name Helpers for TargetTransformInfo implementations
1031 /// Get the ISD node that corresponds to the Instruction class opcode.
1032 int InstructionOpcodeToISD(unsigned Opcode) const;
1034 /// Estimate the cost of type-legalization and the legalized type.
1035 std::pair<int, MVT> getTypeLegalizationCost(const DataLayout &DL,
1040 //===--------------------------------------------------------------------===//
1041 /// \name Helpers for atomic expansion.
1044 /// Perform a load-linked operation on Addr, returning a "Value *" with the
1045 /// corresponding pointee type. This may entail some non-trivial operations to
1046 /// truncate or reconstruct types that will be illegal in the backend. See
1047 /// ARMISelLowering for an example implementation.
1048 virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
1049 AtomicOrdering Ord) const {
1050 llvm_unreachable("Load linked unimplemented on this target");
1053 /// Perform a store-conditional operation to Addr. Return the status of the
1054 /// store. This should be 0 if the store succeeded, non-zero otherwise.
1055 virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
1056 Value *Addr, AtomicOrdering Ord) const {
1057 llvm_unreachable("Store conditional unimplemented on this target");
1060 /// Inserts in the IR a target-specific intrinsic specifying a fence.
1061 /// It is called by AtomicExpandPass before expanding an
1062 /// AtomicRMW/AtomicCmpXchg/AtomicStore/AtomicLoad.
1063 /// RMW and CmpXchg set both IsStore and IsLoad to true.
1064 /// This function should either return a nullptr, or a pointer to an IR-level
1065 /// Instruction*. Even complex fence sequences can be represented by a
1066 /// single Instruction* through an intrinsic to be lowered later.
1067 /// Backends with !getInsertFencesForAtomic() should keep a no-op here.
1068 /// Backends should override this method to produce target-specific intrinsic
1069 /// for their fences.
1070 /// FIXME: Please note that the default implementation here in terms of
1071 /// IR-level fences exists for historical/compatibility reasons and is
1072 /// *unsound* ! Fences cannot, in general, be used to restore sequential
1073 /// consistency. For example, consider the following example:
1074 /// atomic<int> x = y = 0;
1075 /// int r1, r2, r3, r4;
1086 /// r1 = r3 = 1 and r2 = r4 = 0 is impossible as long as the accesses are all
1087 /// seq_cst. But if they are lowered to monotonic accesses, no amount of
1088 /// IR-level fences can prevent it.
1090 virtual Instruction *emitLeadingFence(IRBuilder<> &Builder,
1091 AtomicOrdering Ord, bool IsStore,
1092 bool IsLoad) const {
1093 if (!getInsertFencesForAtomic())
1096 if (isAtLeastRelease(Ord) && IsStore)
1097 return Builder.CreateFence(Ord);
1102 virtual Instruction *emitTrailingFence(IRBuilder<> &Builder,
1103 AtomicOrdering Ord, bool IsStore,
1104 bool IsLoad) const {
1105 if (!getInsertFencesForAtomic())
1108 if (isAtLeastAcquire(Ord))
1109 return Builder.CreateFence(Ord);
1115 // Emits code that executes when the comparison result in the ll/sc
1116 // expansion of a cmpxchg instruction is such that the store-conditional will
1117 // not execute. This makes it possible to balance out the load-linked with
1118 // a dedicated instruction, if desired.
1119 // E.g., on ARM, if ldrex isn't followed by strex, the exclusive monitor would
1120 // be unnecessarily held, except if clrex, inserted by this hook, is executed.
1121 virtual void emitAtomicCmpXchgNoStoreLLBalance(IRBuilder<> &Builder) const {}
1123 /// Returns true if the given (atomic) store should be expanded by the
1124 /// IR-level AtomicExpand pass into an "atomic xchg" which ignores its input.
1125 virtual bool shouldExpandAtomicStoreInIR(StoreInst *SI) const {
1129 /// Returns true if arguments should be sign-extended in lib calls.
1130 virtual bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
1134 /// Returns how the given (atomic) load should be expanded by the
1135 /// IR-level AtomicExpand pass.
1136 virtual AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const {
1137 return AtomicExpansionKind::None;
1140 /// Returns true if the given atomic cmpxchg should be expanded by the
1141 /// IR-level AtomicExpand pass into a load-linked/store-conditional sequence
1142 /// (through emitLoadLinked() and emitStoreConditional()).
1143 virtual bool shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
1147 /// Returns how the IR-level AtomicExpand pass should expand the given
1148 /// AtomicRMW, if at all. Default is to never expand.
1149 virtual AtomicExpansionKind shouldExpandAtomicRMWInIR(AtomicRMWInst *) const {
1150 return AtomicExpansionKind::None;
1153 /// On some platforms, an AtomicRMW that never actually modifies the value
1154 /// (such as fetch_add of 0) can be turned into a fence followed by an
1155 /// atomic load. This may sound useless, but it makes it possible for the
1156 /// processor to keep the cacheline shared, dramatically improving
1157 /// performance. And such idempotent RMWs are useful for implementing some
1158 /// kinds of locks, see for example (justification + benchmarks):
1159 /// http://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
1160 /// This method tries doing that transformation, returning the atomic load if
1161 /// it succeeds, and nullptr otherwise.
1162 /// If shouldExpandAtomicLoadInIR returns true on that load, it will undergo
1163 /// another round of expansion.
1165 lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *RMWI) const {
1169 /// Returns true if we should normalize
1170 /// select(N0&N1, X, Y) => select(N0, select(N1, X, Y), Y) and
1171 /// select(N0|N1, X, Y) => select(N0, select(N1, X, Y, Y)) if it is likely
1172 /// that it saves us from materializing N0 and N1 in an integer register.
1173 /// Targets that are able to perform and/or on flags should return false here.
1174 virtual bool shouldNormalizeToSelectSequence(LLVMContext &Context,
1176 // If a target has multiple condition registers, then it likely has logical
1177 // operations on those registers.
1178 if (hasMultipleConditionRegisters())
1180 // Only do the transform if the value won't be split into multiple
1182 LegalizeTypeAction Action = getTypeAction(Context, VT);
1183 return Action != TypeExpandInteger && Action != TypeExpandFloat &&
1184 Action != TypeSplitVector;
1187 //===--------------------------------------------------------------------===//
1188 // TargetLowering Configuration Methods - These methods should be invoked by
1189 // the derived class constructor to configure this object for the target.
1192 /// Specify how the target extends the result of integer and floating point
1193 /// boolean values from i1 to a wider type. See getBooleanContents.
1194 void setBooleanContents(BooleanContent Ty) {
1195 BooleanContents = Ty;
1196 BooleanFloatContents = Ty;
1199 /// Specify how the target extends the result of integer and floating point
1200 /// boolean values from i1 to a wider type. See getBooleanContents.
1201 void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
1202 BooleanContents = IntTy;
1203 BooleanFloatContents = FloatTy;
1206 /// Specify how the target extends the result of a vector boolean value from a
1207 /// vector of i1 to a wider type. See getBooleanContents.
1208 void setBooleanVectorContents(BooleanContent Ty) {
1209 BooleanVectorContents = Ty;
1212 /// Specify the target scheduling preference.
1213 void setSchedulingPreference(Sched::Preference Pref) {
1214 SchedPreferenceInfo = Pref;
1217 /// Indicate whether this target prefers to use _setjmp to implement
1218 /// llvm.setjmp or the version without _. Defaults to false.
1219 void setUseUnderscoreSetJmp(bool Val) {
1220 UseUnderscoreSetJmp = Val;
1223 /// Indicate whether this target prefers to use _longjmp to implement
1224 /// llvm.longjmp or the version without _. Defaults to false.
1225 void setUseUnderscoreLongJmp(bool Val) {
1226 UseUnderscoreLongJmp = Val;
1229 /// Indicate the number of blocks to generate jump tables rather than if
1231 void setMinimumJumpTableEntries(int Val) {
1232 MinimumJumpTableEntries = Val;
1235 /// If set to a physical register, this specifies the register that
1236 /// llvm.savestack/llvm.restorestack should save and restore.
1237 void setStackPointerRegisterToSaveRestore(unsigned R) {
1238 StackPointerRegisterToSaveRestore = R;
1241 /// Tells the code generator not to expand operations into sequences that use
1242 /// the select operations if possible.
1243 void setSelectIsExpensive(bool isExpensive = true) {
1244 SelectIsExpensive = isExpensive;
1247 /// Tells the code generator that the target has multiple (allocatable)
1248 /// condition registers that can be used to store the results of comparisons
1249 /// for use by selects and conditional branches. With multiple condition
1250 /// registers, the code generator will not aggressively sink comparisons into
1251 /// the blocks of their users.
1252 void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
1253 HasMultipleConditionRegisters = hasManyRegs;
1256 /// Tells the code generator that the target has BitExtract instructions.
1257 /// The code generator will aggressively sink "shift"s into the blocks of
1258 /// their users if the users will generate "and" instructions which can be
1259 /// combined with "shift" to BitExtract instructions.
1260 void setHasExtractBitsInsn(bool hasExtractInsn = true) {
1261 HasExtractBitsInsn = hasExtractInsn;
1264 /// Tells the code generator not to expand logic operations on comparison
1265 /// predicates into separate sequences that increase the amount of flow
1267 void setJumpIsExpensive(bool isExpensive = true);
1269 /// Tells the code generator that fsqrt is cheap, and should not be replaced
1270 /// with an alternative sequence of instructions.
1271 void setFsqrtIsCheap(bool isCheap = true) { FsqrtIsCheap = isCheap; }
1273 /// Tells the code generator that this target supports floating point
1274 /// exceptions and cares about preserving floating point exception behavior.
1275 void setHasFloatingPointExceptions(bool FPExceptions = true) {
1276 HasFloatingPointExceptions = FPExceptions;
1279 /// Tells the code generator which bitwidths to bypass.
1280 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
1281 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
1284 /// Add the specified register class as an available regclass for the
1285 /// specified value type. This indicates the selector can handle values of
1286 /// that class natively.
1287 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
1288 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
1289 AvailableRegClasses.push_back(std::make_pair(VT, RC));
1290 RegClassForVT[VT.SimpleTy] = RC;
1293 /// Remove all register classes.
1294 void clearRegisterClasses() {
1295 std::fill(std::begin(RegClassForVT), std::end(RegClassForVT), nullptr);
1297 AvailableRegClasses.clear();
1300 /// \brief Remove all operation actions.
1301 void clearOperationActions() {
1304 /// Return the largest legal super-reg register class of the register class
1305 /// for the specified type and its associated "cost".
1306 virtual std::pair<const TargetRegisterClass *, uint8_t>
1307 findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT) const;
1309 /// Once all of the register classes are added, this allows us to compute
1310 /// derived properties we expose.
1311 void computeRegisterProperties(const TargetRegisterInfo *TRI);
1313 /// Indicate that the specified operation does not work with the specified
1314 /// type and indicate what to do about it.
1315 void setOperationAction(unsigned Op, MVT VT,
1316 LegalizeAction Action) {
1317 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
1318 OpActions[(unsigned)VT.SimpleTy][Op] = Action;
1321 /// Indicate that the specified load with extension does not work with the
1322 /// specified type and indicate what to do about it.
1323 void setLoadExtAction(unsigned ExtType, MVT ValVT, MVT MemVT,
1324 LegalizeAction Action) {
1325 assert(ExtType < ISD::LAST_LOADEXT_TYPE && ValVT.isValid() &&
1326 MemVT.isValid() && "Table isn't big enough!");
1327 LoadExtActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy][ExtType] = Action;
1330 /// Indicate that the specified truncating store does not work with the
1331 /// specified type and indicate what to do about it.
1332 void setTruncStoreAction(MVT ValVT, MVT MemVT,
1333 LegalizeAction Action) {
1334 assert(ValVT.isValid() && MemVT.isValid() && "Table isn't big enough!");
1335 TruncStoreActions[(unsigned)ValVT.SimpleTy][MemVT.SimpleTy] = Action;
1338 /// Indicate that the specified indexed load does or does not work with the
1339 /// specified type and indicate what to do abort it.
1341 /// NOTE: All indexed mode loads are initialized to Expand in
1342 /// TargetLowering.cpp
1343 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
1344 LegalizeAction Action) {
1345 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1346 (unsigned)Action < 0xf && "Table isn't big enough!");
1347 // Load action are kept in the upper half.
1348 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
1349 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1352 /// Indicate that the specified indexed store does or does not work with the
1353 /// specified type and indicate what to do about it.
1355 /// NOTE: All indexed mode stores are initialized to Expand in
1356 /// TargetLowering.cpp
1357 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1358 LegalizeAction Action) {
1359 assert(VT.isValid() && IdxMode < ISD::LAST_INDEXED_MODE &&
1360 (unsigned)Action < 0xf && "Table isn't big enough!");
1361 // Store action are kept in the lower half.
1362 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1363 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1366 /// Indicate that the specified condition code is or isn't supported on the
1367 /// target and indicate what to do about it.
1368 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1369 LegalizeAction Action) {
1370 assert(VT.isValid() && (unsigned)CC < array_lengthof(CondCodeActions) &&
1371 "Table isn't big enough!");
1372 assert((unsigned)Action < 0x10 && "too many bits for bitfield array");
1373 /// The lower 3 bits of the SimpleTy index into Nth 4bit set from the 32-bit
1374 /// value and the upper 29 bits index into the second dimension of the array
1375 /// to select what 32-bit value to use.
1376 uint32_t Shift = 4 * (VT.SimpleTy & 0x7);
1377 CondCodeActions[CC][VT.SimpleTy >> 3] &= ~((uint32_t)0xF << Shift);
1378 CondCodeActions[CC][VT.SimpleTy >> 3] |= (uint32_t)Action << Shift;
1381 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1382 /// to trying a larger integer/fp until it can find one that works. If that
1383 /// default is insufficient, this method can be used by the target to override
1385 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1386 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1389 /// Targets should invoke this method for each target independent node that
1390 /// they want to provide a custom DAG combiner for by implementing the
1391 /// PerformDAGCombine virtual method.
1392 void setTargetDAGCombine(ISD::NodeType NT) {
1393 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1394 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1397 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1398 void setJumpBufSize(unsigned Size) {
1402 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1404 void setJumpBufAlignment(unsigned Align) {
1405 JumpBufAlignment = Align;
1408 /// Set the target's minimum function alignment (in log2(bytes))
1409 void setMinFunctionAlignment(unsigned Align) {
1410 MinFunctionAlignment = Align;
1413 /// Set the target's preferred function alignment. This should be set if
1414 /// there is a performance benefit to higher-than-minimum alignment (in
1416 void setPrefFunctionAlignment(unsigned Align) {
1417 PrefFunctionAlignment = Align;
1420 /// Set the target's preferred loop alignment. Default alignment is zero, it
1421 /// means the target does not care about loop alignment. The alignment is
1422 /// specified in log2(bytes). The target may also override
1423 /// getPrefLoopAlignment to provide per-loop values.
1424 void setPrefLoopAlignment(unsigned Align) {
1425 PrefLoopAlignment = Align;
1428 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1429 void setMinStackArgumentAlignment(unsigned Align) {
1430 MinStackArgumentAlignment = Align;
1433 /// Set if the DAG builder should automatically insert fences and reduce the
1434 /// order of atomic memory operations to Monotonic.
1435 void setInsertFencesForAtomic(bool fence) {
1436 InsertFencesForAtomic = fence;
1440 //===--------------------------------------------------------------------===//
1441 // Addressing mode description hooks (used by LSR etc).
1444 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1445 /// instructions reading the address. This allows as much computation as
1446 /// possible to be done in the address mode for that operand. This hook lets
1447 /// targets also pass back when this should be done on intrinsics which
1449 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1450 SmallVectorImpl<Value*> &/*Ops*/,
1451 Type *&/*AccessTy*/,
1452 unsigned AddrSpace = 0) const {
1456 /// This represents an addressing mode of:
1457 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1458 /// If BaseGV is null, there is no BaseGV.
1459 /// If BaseOffs is zero, there is no base offset.
1460 /// If HasBaseReg is false, there is no base register.
1461 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1464 GlobalValue *BaseGV;
1468 AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1471 /// Return true if the addressing mode represented by AM is legal for this
1472 /// target, for a load/store of the specified type.
1474 /// The type may be VoidTy, in which case only return true if the addressing
1475 /// mode is legal for a load/store of any legal type. TODO: Handle
1476 /// pre/postinc as well.
1478 /// If the address space cannot be determined, it will be -1.
1480 /// TODO: Remove default argument
1481 virtual bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
1482 Type *Ty, unsigned AddrSpace) const;
1484 /// \brief Return the cost of the scaling factor used in the addressing mode
1485 /// represented by AM for this target, for a load/store of the specified type.
1487 /// If the AM is supported, the return value must be >= 0.
1488 /// If the AM is not supported, it returns a negative value.
1489 /// TODO: Handle pre/postinc as well.
1490 /// TODO: Remove default argument
1491 virtual int getScalingFactorCost(const DataLayout &DL, const AddrMode &AM,
1492 Type *Ty, unsigned AS = 0) const {
1493 // Default: assume that any scaling factor used in a legal AM is free.
1494 if (isLegalAddressingMode(DL, AM, Ty, AS))
1499 /// Return true if the specified immediate is legal icmp immediate, that is
1500 /// the target has icmp instructions which can compare a register against the
1501 /// immediate without having to materialize the immediate into a register.
1502 virtual bool isLegalICmpImmediate(int64_t) const {
1506 /// Return true if the specified immediate is legal add immediate, that is the
1507 /// target has add instructions which can add a register with the immediate
1508 /// without having to materialize the immediate into a register.
1509 virtual bool isLegalAddImmediate(int64_t) const {
1513 /// Return true if it's significantly cheaper to shift a vector by a uniform
1514 /// scalar than by an amount which will vary across each lane. On x86, for
1515 /// example, there is a "psllw" instruction for the former case, but no simple
1516 /// instruction for a general "a << b" operation on vectors.
1517 virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
1521 /// Return true if it's free to truncate a value of type FromTy to type
1522 /// ToTy. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1523 /// by referencing its sub-register AX.
1524 /// Targets must return false when FromTy <= ToTy.
1525 virtual bool isTruncateFree(Type *FromTy, Type *ToTy) const {
1529 /// Return true if a truncation from FromTy to ToTy is permitted when deciding
1530 /// whether a call is in tail position. Typically this means that both results
1531 /// would be assigned to the same register or stack slot, but it could mean
1532 /// the target performs adequate checks of its own before proceeding with the
1533 /// tail call. Targets must return false when FromTy <= ToTy.
1534 virtual bool allowTruncateForTailCall(Type *FromTy, Type *ToTy) const {
1538 virtual bool isTruncateFree(EVT FromVT, EVT ToVT) const {
1542 virtual bool isProfitableToHoist(Instruction *I) const { return true; }
1544 /// Return true if the extension represented by \p I is free.
1545 /// Unlikely the is[Z|FP]ExtFree family which is based on types,
1546 /// this method can use the context provided by \p I to decide
1547 /// whether or not \p I is free.
1548 /// This method extends the behavior of the is[Z|FP]ExtFree family.
1549 /// In other words, if is[Z|FP]Free returns true, then this method
1550 /// returns true as well. The converse is not true.
1551 /// The target can perform the adequate checks by overriding isExtFreeImpl.
1552 /// \pre \p I must be a sign, zero, or fp extension.
1553 bool isExtFree(const Instruction *I) const {
1554 switch (I->getOpcode()) {
1555 case Instruction::FPExt:
1556 if (isFPExtFree(EVT::getEVT(I->getType())))
1559 case Instruction::ZExt:
1560 if (isZExtFree(I->getOperand(0)->getType(), I->getType()))
1563 case Instruction::SExt:
1566 llvm_unreachable("Instruction is not an extension");
1568 return isExtFreeImpl(I);
1571 /// Return true if any actual instruction that defines a value of type FromTy
1572 /// implicitly zero-extends the value to ToTy in the result register.
1574 /// The function should return true when it is likely that the truncate can
1575 /// be freely folded with an instruction defining a value of FromTy. If
1576 /// the defining instruction is unknown (because you're looking at a
1577 /// function argument, PHI, etc.) then the target may require an
1578 /// explicit truncate, which is not necessarily free, but this function
1579 /// does not deal with those cases.
1580 /// Targets must return false when FromTy >= ToTy.
1581 virtual bool isZExtFree(Type *FromTy, Type *ToTy) const {
1585 virtual bool isZExtFree(EVT FromTy, EVT ToTy) const {
1589 /// Return true if the target supplies and combines to a paired load
1590 /// two loaded values of type LoadedType next to each other in memory.
1591 /// RequiredAlignment gives the minimal alignment constraints that must be met
1592 /// to be able to select this paired load.
1594 /// This information is *not* used to generate actual paired loads, but it is
1595 /// used to generate a sequence of loads that is easier to combine into a
1597 /// For instance, something like this:
1598 /// a = load i64* addr
1599 /// b = trunc i64 a to i32
1600 /// c = lshr i64 a, 32
1601 /// d = trunc i64 c to i32
1602 /// will be optimized into:
1603 /// b = load i32* addr1
1604 /// d = load i32* addr2
1605 /// Where addr1 = addr2 +/- sizeof(i32).
1607 /// In other words, unless the target performs a post-isel load combining,
1608 /// this information should not be provided because it will generate more
1610 virtual bool hasPairedLoad(Type * /*LoadedType*/,
1611 unsigned & /*RequiredAligment*/) const {
1615 virtual bool hasPairedLoad(EVT /*LoadedType*/,
1616 unsigned & /*RequiredAligment*/) const {
1620 /// \brief Get the maximum supported factor for interleaved memory accesses.
1621 /// Default to be the minimum interleave factor: 2.
1622 virtual unsigned getMaxSupportedInterleaveFactor() const { return 2; }
1624 /// \brief Lower an interleaved load to target specific intrinsics. Return
1625 /// true on success.
1627 /// \p LI is the vector load instruction.
1628 /// \p Shuffles is the shufflevector list to DE-interleave the loaded vector.
1629 /// \p Indices is the corresponding indices for each shufflevector.
1630 /// \p Factor is the interleave factor.
1631 virtual bool lowerInterleavedLoad(LoadInst *LI,
1632 ArrayRef<ShuffleVectorInst *> Shuffles,
1633 ArrayRef<unsigned> Indices,
1634 unsigned Factor) const {
1638 /// \brief Lower an interleaved store to target specific intrinsics. Return
1639 /// true on success.
1641 /// \p SI is the vector store instruction.
1642 /// \p SVI is the shufflevector to RE-interleave the stored vector.
1643 /// \p Factor is the interleave factor.
1644 virtual bool lowerInterleavedStore(StoreInst *SI, ShuffleVectorInst *SVI,
1645 unsigned Factor) const {
1649 /// Return true if zero-extending the specific node Val to type VT2 is free
1650 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1651 /// because it's folded such as X86 zero-extending loads).
1652 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1653 return isZExtFree(Val.getValueType(), VT2);
1656 /// Return true if an fpext operation is free (for instance, because
1657 /// single-precision floating-point numbers are implicitly extended to
1658 /// double-precision).
1659 virtual bool isFPExtFree(EVT VT) const {
1660 assert(VT.isFloatingPoint());
1664 /// Return true if folding a vector load into ExtVal (a sign, zero, or any
1665 /// extend node) is profitable.
1666 virtual bool isVectorLoadExtDesirable(SDValue ExtVal) const { return false; }
1668 /// Return true if an fneg operation is free to the point where it is never
1669 /// worthwhile to replace it with a bitwise operation.
1670 virtual bool isFNegFree(EVT VT) const {
1671 assert(VT.isFloatingPoint());
1675 /// Return true if an fabs operation is free to the point where it is never
1676 /// worthwhile to replace it with a bitwise operation.
1677 virtual bool isFAbsFree(EVT VT) const {
1678 assert(VT.isFloatingPoint());
1682 /// Return true if an FMA operation is faster than a pair of fmul and fadd
1683 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1684 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1686 /// NOTE: This may be called before legalization on types for which FMAs are
1687 /// not legal, but should return true if those types will eventually legalize
1688 /// to types that support FMAs. After legalization, it will only be called on
1689 /// types that support FMAs (via Legal or Custom actions)
1690 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1694 /// Return true if it's profitable to narrow operations of type VT1 to
1695 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1697 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1701 /// \brief Return true if it is beneficial to convert a load of a constant to
1702 /// just the constant itself.
1703 /// On some targets it might be more efficient to use a combination of
1704 /// arithmetic instructions to materialize the constant instead of loading it
1705 /// from a constant pool.
1706 virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
1711 /// Return true if EXTRACT_SUBVECTOR is cheap for this result type
1712 /// with this index. This is needed because EXTRACT_SUBVECTOR usually
1713 /// has custom lowering that depends on the index of the first element,
1714 /// and only the target knows which lowering is cheap.
1715 virtual bool isExtractSubvectorCheap(EVT ResVT, unsigned Index) const {
1719 // Return true if it is profitable to use a scalar input to a BUILD_VECTOR
1720 // even if the vector itself has multiple uses.
1721 virtual bool aggressivelyPreferBuildVectorSources(EVT VecVT) const {
1725 //===--------------------------------------------------------------------===//
1726 // Runtime Library hooks
1729 /// Rename the default libcall routine name for the specified libcall.
1730 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1731 LibcallRoutineNames[Call] = Name;
1734 /// Get the libcall routine name for the specified libcall.
1735 const char *getLibcallName(RTLIB::Libcall Call) const {
1736 return LibcallRoutineNames[Call];
1739 /// Override the default CondCode to be used to test the result of the
1740 /// comparison libcall against zero.
1741 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1742 CmpLibcallCCs[Call] = CC;
1745 /// Get the CondCode that's to be used to test the result of the comparison
1746 /// libcall against zero.
1747 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1748 return CmpLibcallCCs[Call];
1751 /// Set the CallingConv that should be used for the specified libcall.
1752 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1753 LibcallCallingConvs[Call] = CC;
1756 /// Get the CallingConv that should be used for the specified libcall.
1757 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1758 return LibcallCallingConvs[Call];
1762 const TargetMachine &TM;
1764 /// Tells the code generator not to expand operations into sequences that use
1765 /// the select operations if possible.
1766 bool SelectIsExpensive;
1768 /// Tells the code generator that the target has multiple (allocatable)
1769 /// condition registers that can be used to store the results of comparisons
1770 /// for use by selects and conditional branches. With multiple condition
1771 /// registers, the code generator will not aggressively sink comparisons into
1772 /// the blocks of their users.
1773 bool HasMultipleConditionRegisters;
1775 /// Tells the code generator that the target has BitExtract instructions.
1776 /// The code generator will aggressively sink "shift"s into the blocks of
1777 /// their users if the users will generate "and" instructions which can be
1778 /// combined with "shift" to BitExtract instructions.
1779 bool HasExtractBitsInsn;
1781 // Don't expand fsqrt with an approximation based on the inverse sqrt.
1784 /// Tells the code generator to bypass slow divide or remainder
1785 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1786 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1787 /// div/rem when the operands are positive and less than 256.
1788 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1790 /// Tells the code generator that it shouldn't generate extra flow control
1791 /// instructions and should attempt to combine flow control instructions via
1793 bool JumpIsExpensive;
1795 /// Whether the target supports or cares about preserving floating point
1796 /// exception behavior.
1797 bool HasFloatingPointExceptions;
1799 /// This target prefers to use _setjmp to implement llvm.setjmp.
1801 /// Defaults to false.
1802 bool UseUnderscoreSetJmp;
1804 /// This target prefers to use _longjmp to implement llvm.longjmp.
1806 /// Defaults to false.
1807 bool UseUnderscoreLongJmp;
1809 /// Number of blocks threshold to use jump tables.
1810 int MinimumJumpTableEntries;
1812 /// Information about the contents of the high-bits in boolean values held in
1813 /// a type wider than i1. See getBooleanContents.
1814 BooleanContent BooleanContents;
1816 /// Information about the contents of the high-bits in boolean values held in
1817 /// a type wider than i1. See getBooleanContents.
1818 BooleanContent BooleanFloatContents;
1820 /// Information about the contents of the high-bits in boolean vector values
1821 /// when the element type is wider than i1. See getBooleanContents.
1822 BooleanContent BooleanVectorContents;
1824 /// The target scheduling preference: shortest possible total cycles or lowest
1826 Sched::Preference SchedPreferenceInfo;
1828 /// The size, in bytes, of the target's jmp_buf buffers
1829 unsigned JumpBufSize;
1831 /// The alignment, in bytes, of the target's jmp_buf buffers
1832 unsigned JumpBufAlignment;
1834 /// The minimum alignment that any argument on the stack needs to have.
1835 unsigned MinStackArgumentAlignment;
1837 /// The minimum function alignment (used when optimizing for size, and to
1838 /// prevent explicitly provided alignment from leading to incorrect code).
1839 unsigned MinFunctionAlignment;
1841 /// The preferred function alignment (used when alignment unspecified and
1842 /// optimizing for speed).
1843 unsigned PrefFunctionAlignment;
1845 /// The preferred loop alignment.
1846 unsigned PrefLoopAlignment;
1848 /// Whether the DAG builder should automatically insert fences and reduce
1849 /// ordering for atomics. (This will be set for for most architectures with
1850 /// weak memory ordering.)
1851 bool InsertFencesForAtomic;
1853 /// If set to a physical register, this specifies the register that
1854 /// llvm.savestack/llvm.restorestack should save and restore.
1855 unsigned StackPointerRegisterToSaveRestore;
1857 /// This indicates the default register class to use for each ValueType the
1858 /// target supports natively.
1859 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1860 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1861 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1863 /// This indicates the "representative" register class to use for each
1864 /// ValueType the target supports natively. This information is used by the
1865 /// scheduler to track register pressure. By default, the representative
1866 /// register class is the largest legal super-reg register class of the
1867 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1868 /// representative class would be GR32.
1869 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1871 /// This indicates the "cost" of the "representative" register class for each
1872 /// ValueType. The cost is used by the scheduler to approximate register
1874 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1876 /// For any value types we are promoting or expanding, this contains the value
1877 /// type that we are changing to. For Expanded types, this contains one step
1878 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1879 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
1880 /// the same type (e.g. i32 -> i32).
1881 MVT TransformToType[MVT::LAST_VALUETYPE];
1883 /// For each operation and each value type, keep a LegalizeAction that
1884 /// indicates how instruction selection should deal with the operation. Most
1885 /// operations are Legal (aka, supported natively by the target), but
1886 /// operations that are not should be described. Note that operations on
1887 /// non-legal value types are not described here.
1888 LegalizeAction OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1890 /// For each load extension type and each value type, keep a LegalizeAction
1891 /// that indicates how instruction selection should deal with a load of a
1892 /// specific value type and extension type.
1893 LegalizeAction LoadExtActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE]
1894 [ISD::LAST_LOADEXT_TYPE];
1896 /// For each value type pair keep a LegalizeAction that indicates whether a
1897 /// truncating store of a specific value type and truncating type is legal.
1898 LegalizeAction TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1900 /// For each indexed mode and each value type, keep a pair of LegalizeAction
1901 /// that indicates how instruction selection should deal with the load /
1904 /// The first dimension is the value_type for the reference. The second
1905 /// dimension represents the various modes for load store.
1906 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1908 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1909 /// indicates how instruction selection should deal with the condition code.
1911 /// Because each CC action takes up 4 bits, we need to have the array size be
1912 /// large enough to fit all of the value types. This can be done by rounding
1913 /// up the MVT::LAST_VALUETYPE value to the next multiple of 8.
1914 uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 7) / 8];
1917 ValueTypeActionImpl ValueTypeActions;
1920 LegalizeKind getTypeConversion(LLVMContext &Context, EVT VT) const;
1923 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1925 /// Targets can specify ISD nodes that they would like PerformDAGCombine
1926 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1929 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1931 /// For operations that must be promoted to a specific type, this holds the
1932 /// destination type. This map should be sparse, so don't hold it as an
1935 /// Targets add entries to this map with AddPromotedToType(..), clients access
1936 /// this with getTypeToPromoteTo(..).
1937 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1940 /// Stores the name each libcall.
1941 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1943 /// The ISD::CondCode that should be used to test the result of each of the
1944 /// comparison libcall against zero.
1945 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1947 /// Stores the CallingConv that should be used for each libcall.
1948 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1951 /// Return true if the extension represented by \p I is free.
1952 /// \pre \p I is a sign, zero, or fp extension and
1953 /// is[Z|FP]ExtFree of the related types is not true.
1954 virtual bool isExtFreeImpl(const Instruction *I) const { return false; }
1956 /// Depth that GatherAllAliases should should continue looking for chain
1957 /// dependencies when trying to find a more preferrable chain. As an
1958 /// approximation, this should be more than the number of consecutive stores
1959 /// expected to be merged.
1960 unsigned GatherAllAliasesMaxDepth;
1962 /// \brief Specify maximum number of store instructions per memset call.
1964 /// When lowering \@llvm.memset this field specifies the maximum number of
1965 /// store operations that may be substituted for the call to memset. Targets
1966 /// must set this value based on the cost threshold for that target. Targets
1967 /// should assume that the memset will be done using as many of the largest
1968 /// store operations first, followed by smaller ones, if necessary, per
1969 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1970 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1971 /// store. This only applies to setting a constant array of a constant size.
1972 unsigned MaxStoresPerMemset;
1974 /// Maximum number of stores operations that may be substituted for the call
1975 /// to memset, used for functions with OptSize attribute.
1976 unsigned MaxStoresPerMemsetOptSize;
1978 /// \brief Specify maximum bytes of store instructions per memcpy call.
1980 /// When lowering \@llvm.memcpy this field specifies the maximum number of
1981 /// store operations that may be substituted for a call to memcpy. Targets
1982 /// must set this value based on the cost threshold for that target. Targets
1983 /// should assume that the memcpy will be done using as many of the largest
1984 /// store operations first, followed by smaller ones, if necessary, per
1985 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1986 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1987 /// and one 1-byte store. This only applies to copying a constant array of
1989 unsigned MaxStoresPerMemcpy;
1991 /// Maximum number of store operations that may be substituted for a call to
1992 /// memcpy, used for functions with OptSize attribute.
1993 unsigned MaxStoresPerMemcpyOptSize;
1995 /// \brief Specify maximum bytes of store instructions per memmove call.
1997 /// When lowering \@llvm.memmove this field specifies the maximum number of
1998 /// store instructions that may be substituted for a call to memmove. Targets
1999 /// must set this value based on the cost threshold for that target. Targets
2000 /// should assume that the memmove will be done using as many of the largest
2001 /// store operations first, followed by smaller ones, if necessary, per
2002 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
2003 /// with 8-bit alignment would result in nine 1-byte stores. This only
2004 /// applies to copying a constant array of constant size.
2005 unsigned MaxStoresPerMemmove;
2007 /// Maximum number of store instructions that may be substituted for a call to
2008 /// memmove, used for functions with OptSize attribute.
2009 unsigned MaxStoresPerMemmoveOptSize;
2011 /// Tells the code generator that select is more expensive than a branch if
2012 /// the branch is usually predicted right.
2013 bool PredictableSelectIsExpensive;
2015 /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
2016 /// a mask of a single bit, a compare, and a branch into a single instruction.
2017 bool MaskAndBranchFoldingIsLegal;
2019 /// \see enableExtLdPromotion.
2020 bool EnableExtLdPromotion;
2023 /// Return true if the value types that can be represented by the specified
2024 /// register class are all legal.
2025 bool isLegalRC(const TargetRegisterClass *RC) const;
2027 /// Replace/modify any TargetFrameIndex operands with a targte-dependent
2028 /// sequence of memory operands that is recognized by PrologEpilogInserter.
2029 MachineBasicBlock *emitPatchPoint(MachineInstr *MI,
2030 MachineBasicBlock *MBB) const;
2033 /// This class defines information used to lower LLVM code to legal SelectionDAG
2034 /// operators that the target instruction selector can accept natively.
2036 /// This class also defines callbacks that targets must implement to lower
2037 /// target-specific constructs to SelectionDAG operators.
2038 class TargetLowering : public TargetLoweringBase {
2039 TargetLowering(const TargetLowering&) = delete;
2040 void operator=(const TargetLowering&) = delete;
2043 /// NOTE: The TargetMachine owns TLOF.
2044 explicit TargetLowering(const TargetMachine &TM);
2046 /// Returns true by value, base pointer and offset pointer and addressing mode
2047 /// by reference if the node's address can be legally represented as
2048 /// pre-indexed load / store address.
2049 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
2050 SDValue &/*Offset*/,
2051 ISD::MemIndexedMode &/*AM*/,
2052 SelectionDAG &/*DAG*/) const {
2056 /// Returns true by value, base pointer and offset pointer and addressing mode
2057 /// by reference if this node can be combined with a load / store to form a
2058 /// post-indexed load / store.
2059 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
2061 SDValue &/*Offset*/,
2062 ISD::MemIndexedMode &/*AM*/,
2063 SelectionDAG &/*DAG*/) const {
2067 /// Return the entry encoding for a jump table in the current function. The
2068 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
2069 virtual unsigned getJumpTableEncoding() const;
2071 virtual const MCExpr *
2072 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
2073 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
2074 MCContext &/*Ctx*/) const {
2075 llvm_unreachable("Need to implement this hook if target has custom JTIs");
2078 /// Returns relocation base for the given PIC jumptable.
2079 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
2080 SelectionDAG &DAG) const;
2082 /// This returns the relocation base for the given PIC jumptable, the same as
2083 /// getPICJumpTableRelocBase, but as an MCExpr.
2084 virtual const MCExpr *
2085 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
2086 unsigned JTI, MCContext &Ctx) const;
2088 /// Return true if folding a constant offset with the given GlobalAddress is
2089 /// legal. It is frequently not legal in PIC relocation models.
2090 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
2092 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
2093 SDValue &Chain) const;
2095 void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
2096 SDValue &NewLHS, SDValue &NewRHS,
2097 ISD::CondCode &CCCode, SDLoc DL) const;
2099 /// Returns a pair of (return value, chain).
2100 /// It is an error to pass RTLIB::UNKNOWN_LIBCALL as \p LC.
2101 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
2102 EVT RetVT, ArrayRef<SDValue> Ops,
2103 bool isSigned, SDLoc dl,
2104 bool doesNotReturn = false,
2105 bool isReturnValueUsed = true) const;
2107 //===--------------------------------------------------------------------===//
2108 // TargetLowering Optimization Methods
2111 /// A convenience struct that encapsulates a DAG, and two SDValues for
2112 /// returning information from TargetLowering to its clients that want to
2114 struct TargetLoweringOpt {
2121 explicit TargetLoweringOpt(SelectionDAG &InDAG,
2123 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
2125 bool LegalTypes() const { return LegalTys; }
2126 bool LegalOperations() const { return LegalOps; }
2128 bool CombineTo(SDValue O, SDValue N) {
2134 /// Check to see if the specified operand of the specified instruction is a
2135 /// constant integer. If so, check to see if there are any bits set in the
2136 /// constant that are not demanded. If so, shrink the constant and return
2138 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
2140 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
2141 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
2142 /// generalized for targets with other types of implicit widening casts.
2143 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
2147 /// Look at Op. At this point, we know that only the DemandedMask bits of the
2148 /// result of Op are ever used downstream. If we can use this information to
2149 /// simplify Op, create a new simplified DAG node and return true, returning
2150 /// the original and new nodes in Old and New. Otherwise, analyze the
2151 /// expression and return a mask of KnownOne and KnownZero bits for the
2152 /// expression (used to simplify the caller). The KnownZero/One bits may only
2153 /// be accurate for those bits in the DemandedMask.
2154 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
2155 APInt &KnownZero, APInt &KnownOne,
2156 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
2158 /// Determine which of the bits specified in Mask are known to be either zero
2159 /// or one and return them in the KnownZero/KnownOne bitsets.
2160 virtual void computeKnownBitsForTargetNode(const SDValue Op,
2163 const SelectionDAG &DAG,
2164 unsigned Depth = 0) const;
2166 /// This method can be implemented by targets that want to expose additional
2167 /// information about sign bits to the DAG Combiner.
2168 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
2169 const SelectionDAG &DAG,
2170 unsigned Depth = 0) const;
2172 struct DAGCombinerInfo {
2173 void *DC; // The DAG Combiner object.
2175 bool CalledByLegalizer;
2179 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
2180 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
2182 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
2183 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
2184 bool isAfterLegalizeVectorOps() const {
2185 return Level == AfterLegalizeDAG;
2187 CombineLevel getDAGCombineLevel() { return Level; }
2188 bool isCalledByLegalizer() const { return CalledByLegalizer; }
2190 void AddToWorklist(SDNode *N);
2191 void RemoveFromWorklist(SDNode *N);
2192 SDValue CombineTo(SDNode *N, ArrayRef<SDValue> To, bool AddTo = true);
2193 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
2194 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
2196 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
2199 /// Return if the N is a constant or constant vector equal to the true value
2200 /// from getBooleanContents().
2201 bool isConstTrueVal(const SDNode *N) const;
2203 /// Return if the N is a constant or constant vector equal to the false value
2204 /// from getBooleanContents().
2205 bool isConstFalseVal(const SDNode *N) const;
2207 /// Try to simplify a setcc built with the specified operands and cc. If it is
2208 /// unable to simplify it, return a null SDValue.
2209 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
2210 ISD::CondCode Cond, bool foldBooleans,
2211 DAGCombinerInfo &DCI, SDLoc dl) const;
2213 /// Returns true (and the GlobalValue and the offset) if the node is a
2214 /// GlobalAddress + offset.
2216 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
2218 /// This method will be invoked for all target nodes and for any
2219 /// target-independent nodes that the target has registered with invoke it
2222 /// The semantics are as follows:
2224 /// SDValue.Val == 0 - No change was made
2225 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
2226 /// otherwise - N should be replaced by the returned Operand.
2228 /// In addition, methods provided by DAGCombinerInfo may be used to perform
2229 /// more complex transformations.
2231 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
2233 /// Return true if it is profitable to move a following shift through this
2234 // node, adjusting any immediate operands as necessary to preserve semantics.
2235 // This transformation may not be desirable if it disrupts a particularly
2236 // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
2237 // By default, it returns true.
2238 virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
2242 /// Return true if the target has native support for the specified value type
2243 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
2244 /// i16 is legal, but undesirable since i16 instruction encodings are longer
2245 /// and some i16 instructions are slow.
2246 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
2247 // By default, assume all legal types are desirable.
2248 return isTypeLegal(VT);
2251 /// Return true if it is profitable for dag combiner to transform a floating
2252 /// point op of specified opcode to a equivalent op of an integer
2253 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
2254 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
2259 /// This method query the target whether it is beneficial for dag combiner to
2260 /// promote the specified node. If true, it should return the desired
2261 /// promotion type by reference.
2262 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
2266 /// Return true if the target supports that a subset of CSRs for the given
2267 /// machine function is handled explicitly via copies.
2268 virtual bool supportSplitCSR(MachineFunction *MF) const {
2272 /// Return true if the MachineFunction contains a COPY which would imply
2273 /// HasOpaqueSPAdjustment.
2274 virtual bool hasCopyImplyingStackAdjustment(MachineFunction *MF) const {
2278 /// Perform necessary initialization to handle a subset of CSRs explicitly
2279 /// via copies. This function is called at the beginning of instruction
2281 virtual void initializeSplitCSR(MachineBasicBlock *Entry) const {
2282 llvm_unreachable("Not Implemented");
2285 /// Insert explicit copies in entry and exit blocks. We copy a subset of
2286 /// CSRs to virtual registers in the entry block, and copy them back to
2287 /// physical registers in the exit blocks. This function is called at the end
2288 /// of instruction selection.
2289 virtual void insertCopiesSplitCSR(
2290 MachineBasicBlock *Entry,
2291 const SmallVectorImpl<MachineBasicBlock *> &Exits) const {
2292 llvm_unreachable("Not Implemented");
2295 //===--------------------------------------------------------------------===//
2296 // Lowering methods - These methods must be implemented by targets so that
2297 // the SelectionDAGBuilder code knows how to lower these.
2300 /// This hook must be implemented to lower the incoming (formal) arguments,
2301 /// described by the Ins array, into the specified DAG. The implementation
2302 /// should fill in the InVals array with legal-type argument values, and
2303 /// return the resulting token chain value.
2306 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2308 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
2309 SDLoc /*dl*/, SelectionDAG &/*DAG*/,
2310 SmallVectorImpl<SDValue> &/*InVals*/) const {
2311 llvm_unreachable("Not Implemented");
2314 struct ArgListEntry {
2323 bool isInAlloca : 1;
2324 bool isReturned : 1;
2327 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
2328 isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
2329 isReturned(false), Alignment(0) { }
2331 void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
2333 typedef std::vector<ArgListEntry> ArgListTy;
2335 /// This structure contains all information that is necessary for lowering
2336 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
2337 /// needs to lower a call, and targets will see this struct in their LowerCall
2339 struct CallLoweringInfo {
2346 bool DoesNotReturn : 1;
2347 bool IsReturnValueUsed : 1;
2349 // IsTailCall should be modified by implementations of
2350 // TargetLowering::LowerCall that perform tail call conversions.
2353 unsigned NumFixedArgs;
2354 CallingConv::ID CallConv;
2359 ImmutableCallSite *CS;
2361 SmallVector<ISD::OutputArg, 32> Outs;
2362 SmallVector<SDValue, 32> OutVals;
2363 SmallVector<ISD::InputArg, 32> Ins;
2365 CallLoweringInfo(SelectionDAG &DAG)
2366 : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
2367 IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
2368 IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
2369 DAG(DAG), CS(nullptr), IsPatchPoint(false) {}
2371 CallLoweringInfo &setDebugLoc(SDLoc dl) {
2376 CallLoweringInfo &setChain(SDValue InChain) {
2381 CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
2382 SDValue Target, ArgListTy &&ArgsList,
2383 unsigned FixedArgs = -1) {
2388 (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
2389 Args = std::move(ArgsList);
2393 CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
2394 SDValue Target, ArgListTy &&ArgsList,
2395 ImmutableCallSite &Call) {
2398 IsInReg = Call.paramHasAttr(0, Attribute::InReg);
2399 DoesNotReturn = Call.doesNotReturn();
2400 IsVarArg = FTy->isVarArg();
2401 IsReturnValueUsed = !Call.getInstruction()->use_empty();
2402 RetSExt = Call.paramHasAttr(0, Attribute::SExt);
2403 RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
2407 CallConv = Call.getCallingConv();
2408 NumFixedArgs = FTy->getNumParams();
2409 Args = std::move(ArgsList);
2416 CallLoweringInfo &setInRegister(bool Value = true) {
2421 CallLoweringInfo &setNoReturn(bool Value = true) {
2422 DoesNotReturn = Value;
2426 CallLoweringInfo &setVarArg(bool Value = true) {
2431 CallLoweringInfo &setTailCall(bool Value = true) {
2436 CallLoweringInfo &setDiscardResult(bool Value = true) {
2437 IsReturnValueUsed = !Value;
2441 CallLoweringInfo &setSExtResult(bool Value = true) {
2446 CallLoweringInfo &setZExtResult(bool Value = true) {
2451 CallLoweringInfo &setIsPatchPoint(bool Value = true) {
2452 IsPatchPoint = Value;
2456 ArgListTy &getArgs() {
2462 /// This function lowers an abstract call to a function into an actual call.
2463 /// This returns a pair of operands. The first element is the return value
2464 /// for the function (if RetTy is not VoidTy). The second element is the
2465 /// outgoing token chain. It calls LowerCall to do the actual lowering.
2466 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
2468 /// This hook must be implemented to lower calls into the specified
2469 /// DAG. The outgoing arguments to the call are described by the Outs array,
2470 /// and the values to be returned by the call are described by the Ins
2471 /// array. The implementation should fill in the InVals array with legal-type
2472 /// return values from the call, and return the resulting token chain value.
2474 LowerCall(CallLoweringInfo &/*CLI*/,
2475 SmallVectorImpl<SDValue> &/*InVals*/) const {
2476 llvm_unreachable("Not Implemented");
2479 /// Target-specific cleanup for formal ByVal parameters.
2480 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2482 /// This hook should be implemented to check whether the return values
2483 /// described by the Outs array can fit into the return registers. If false
2484 /// is returned, an sret-demotion is performed.
2485 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2486 MachineFunction &/*MF*/, bool /*isVarArg*/,
2487 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2488 LLVMContext &/*Context*/) const
2490 // Return true by default to get preexisting behavior.
2494 /// This hook must be implemented to lower outgoing return values, described
2495 /// by the Outs array, into the specified DAG. The implementation should
2496 /// return the resulting token chain value.
2498 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2500 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2501 const SmallVectorImpl<SDValue> &/*OutVals*/,
2502 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2503 llvm_unreachable("Not Implemented");
2506 /// Return true if result of the specified node is used by a return node
2507 /// only. It also compute and return the input chain for the tail call.
2509 /// This is used to determine whether it is possible to codegen a libcall as
2510 /// tail call at legalization time.
2511 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2515 /// Return true if the target may be able emit the call instruction as a tail
2516 /// call. This is used by optimization passes to determine if it's profitable
2517 /// to duplicate return instructions to enable tailcall optimization.
2518 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2522 /// Return the builtin name for the __builtin___clear_cache intrinsic
2523 /// Default is to invoke the clear cache library call
2524 virtual const char * getClearCacheBuiltinName() const {
2525 return "__clear_cache";
2528 /// Return the register ID of the name passed in. Used by named register
2529 /// global variables extension. There is no target-independent behaviour
2530 /// so the default action is to bail.
2531 virtual unsigned getRegisterByName(const char* RegName, EVT VT,
2532 SelectionDAG &DAG) const {
2533 report_fatal_error("Named registers not implemented for this target");
2536 /// Return the type that should be used to zero or sign extend a
2537 /// zeroext/signext integer argument or return value. FIXME: Most C calling
2538 /// convention requires the return type to be promoted, but this is not true
2539 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2540 /// calling conventions. The frontend should handle this and include all of
2541 /// the necessary information.
2542 virtual EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
2543 ISD::NodeType /*ExtendKind*/) const {
2544 EVT MinVT = getRegisterType(Context, MVT::i32);
2545 return VT.bitsLT(MinVT) ? MinVT : VT;
2548 /// For some targets, an LLVM struct type must be broken down into multiple
2549 /// simple types, but the calling convention specifies that the entire struct
2550 /// must be passed in a block of consecutive registers.
2552 functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
2553 bool isVarArg) const {
2557 /// Returns a 0 terminated array of registers that can be safely used as
2558 /// scratch registers.
2559 virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
2563 /// This callback is used to prepare for a volatile or atomic load.
2564 /// It takes a chain node as input and returns the chain for the load itself.
2566 /// Having a callback like this is necessary for targets like SystemZ,
2567 /// which allows a CPU to reuse the result of a previous load indefinitely,
2568 /// even if a cache-coherent store is performed by another CPU. The default
2569 /// implementation does nothing.
2570 virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
2571 SelectionDAG &DAG) const {
2575 /// This callback is invoked by the type legalizer to legalize nodes with an
2576 /// illegal operand type but legal result types. It replaces the
2577 /// LowerOperation callback in the type Legalizer. The reason we can not do
2578 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2579 /// use this callback.
2581 /// TODO: Consider merging with ReplaceNodeResults.
2583 /// The target places new result values for the node in Results (their number
2584 /// and types must exactly match those of the original return values of
2585 /// the node), or leaves Results empty, which indicates that the node is not
2586 /// to be custom lowered after all.
2587 /// The default implementation calls LowerOperation.
2588 virtual void LowerOperationWrapper(SDNode *N,
2589 SmallVectorImpl<SDValue> &Results,
2590 SelectionDAG &DAG) const;
2592 /// This callback is invoked for operations that are unsupported by the
2593 /// target, which are registered to use 'custom' lowering, and whose defined
2594 /// values are all legal. If the target has no operations that require custom
2595 /// lowering, it need not implement this. The default implementation of this
2597 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2599 /// This callback is invoked when a node result type is illegal for the
2600 /// target, and the operation was registered to use 'custom' lowering for that
2601 /// result type. The target places new result values for the node in Results
2602 /// (their number and types must exactly match those of the original return
2603 /// values of the node), or leaves Results empty, which indicates that the
2604 /// node is not to be custom lowered after all.
2606 /// If the target has no operations that require custom lowering, it need not
2607 /// implement this. The default implementation aborts.
2608 virtual void ReplaceNodeResults(SDNode * /*N*/,
2609 SmallVectorImpl<SDValue> &/*Results*/,
2610 SelectionDAG &/*DAG*/) const {
2611 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2614 /// This method returns the name of a target specific DAG node.
2615 virtual const char *getTargetNodeName(unsigned Opcode) const;
2617 /// This method returns a target specific FastISel object, or null if the
2618 /// target does not support "fast" ISel.
2619 virtual FastISel *createFastISel(FunctionLoweringInfo &,
2620 const TargetLibraryInfo *) const {
2625 bool verifyReturnAddressArgumentIsConstant(SDValue Op,
2626 SelectionDAG &DAG) const;
2628 //===--------------------------------------------------------------------===//
2629 // Inline Asm Support hooks
2632 /// This hook allows the target to expand an inline asm call to be explicit
2633 /// llvm code if it wants to. This is useful for turning simple inline asms
2634 /// into LLVM intrinsics, which gives the compiler more information about the
2635 /// behavior of the code.
2636 virtual bool ExpandInlineAsm(CallInst *) const {
2640 enum ConstraintType {
2641 C_Register, // Constraint represents specific register(s).
2642 C_RegisterClass, // Constraint represents any of register(s) in class.
2643 C_Memory, // Memory constraint.
2644 C_Other, // Something else.
2645 C_Unknown // Unsupported constraint.
2648 enum ConstraintWeight {
2650 CW_Invalid = -1, // No match.
2651 CW_Okay = 0, // Acceptable.
2652 CW_Good = 1, // Good weight.
2653 CW_Better = 2, // Better weight.
2654 CW_Best = 3, // Best weight.
2656 // Well-known weights.
2657 CW_SpecificReg = CW_Okay, // Specific register operands.
2658 CW_Register = CW_Good, // Register operands.
2659 CW_Memory = CW_Better, // Memory operands.
2660 CW_Constant = CW_Best, // Constant operand.
2661 CW_Default = CW_Okay // Default or don't know type.
2664 /// This contains information for each constraint that we are lowering.
2665 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2666 /// This contains the actual string for the code, like "m". TargetLowering
2667 /// picks the 'best' code from ConstraintInfo::Codes that most closely
2668 /// matches the operand.
2669 std::string ConstraintCode;
2671 /// Information about the constraint code, e.g. Register, RegisterClass,
2672 /// Memory, Other, Unknown.
2673 TargetLowering::ConstraintType ConstraintType;
2675 /// If this is the result output operand or a clobber, this is null,
2676 /// otherwise it is the incoming operand to the CallInst. This gets
2677 /// modified as the asm is processed.
2678 Value *CallOperandVal;
2680 /// The ValueType for the operand value.
2683 /// Return true of this is an input operand that is a matching constraint
2685 bool isMatchingInputConstraint() const;
2687 /// If this is an input matching constraint, this method returns the output
2688 /// operand it matches.
2689 unsigned getMatchedOperand() const;
2691 /// Copy constructor for copying from a ConstraintInfo.
2692 AsmOperandInfo(InlineAsm::ConstraintInfo Info)
2693 : InlineAsm::ConstraintInfo(std::move(Info)),
2694 ConstraintType(TargetLowering::C_Unknown), CallOperandVal(nullptr),
2695 ConstraintVT(MVT::Other) {}
2698 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2700 /// Split up the constraint string from the inline assembly value into the
2701 /// specific constraints and their prefixes, and also tie in the associated
2702 /// operand values. If this returns an empty vector, and if the constraint
2703 /// string itself isn't empty, there was an error parsing.
2704 virtual AsmOperandInfoVector ParseConstraints(const DataLayout &DL,
2705 const TargetRegisterInfo *TRI,
2706 ImmutableCallSite CS) const;
2708 /// Examine constraint type and operand type and determine a weight value.
2709 /// The operand object must already have been set up with the operand type.
2710 virtual ConstraintWeight getMultipleConstraintMatchWeight(
2711 AsmOperandInfo &info, int maIndex) const;
2713 /// Examine constraint string and operand type and determine a weight value.
2714 /// The operand object must already have been set up with the operand type.
2715 virtual ConstraintWeight getSingleConstraintMatchWeight(
2716 AsmOperandInfo &info, const char *constraint) const;
2718 /// Determines the constraint code and constraint type to use for the specific
2719 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2720 /// If the actual operand being passed in is available, it can be passed in as
2721 /// Op, otherwise an empty SDValue can be passed.
2722 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2724 SelectionDAG *DAG = nullptr) const;
2726 /// Given a constraint, return the type of constraint it is for this target.
2727 virtual ConstraintType getConstraintType(StringRef Constraint) const;
2729 /// Given a physical register constraint (e.g. {edx}), return the register
2730 /// number and the register class for the register.
2732 /// Given a register class constraint, like 'r', if this corresponds directly
2733 /// to an LLVM register class, return a register of 0 and the register class
2736 /// This should only be used for C_Register constraints. On error, this
2737 /// returns a register number of 0 and a null register class pointer.
2738 virtual std::pair<unsigned, const TargetRegisterClass *>
2739 getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
2740 StringRef Constraint, MVT VT) const;
2742 virtual unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const {
2743 if (ConstraintCode == "i")
2744 return InlineAsm::Constraint_i;
2745 else if (ConstraintCode == "m")
2746 return InlineAsm::Constraint_m;
2747 return InlineAsm::Constraint_Unknown;
2750 /// Try to replace an X constraint, which matches anything, with another that
2751 /// has more specific requirements based on the type of the corresponding
2752 /// operand. This returns null if there is no replacement to make.
2753 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2755 /// Lower the specified operand into the Ops vector. If it is invalid, don't
2756 /// add anything to Ops.
2757 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2758 std::vector<SDValue> &Ops,
2759 SelectionDAG &DAG) const;
2761 //===--------------------------------------------------------------------===//
2762 // Div utility functions
2764 SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2765 bool IsAfterLegalization,
2766 std::vector<SDNode *> *Created) const;
2767 SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
2768 bool IsAfterLegalization,
2769 std::vector<SDNode *> *Created) const;
2771 /// Targets may override this function to provide custom SDIV lowering for
2772 /// power-of-2 denominators. If the target returns an empty SDValue, LLVM
2773 /// assumes SDIV is expensive and replaces it with a series of other integer
2775 virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
2777 std::vector<SDNode *> *Created) const;
2779 /// Indicate whether this target prefers to combine FDIVs with the same
2780 /// divisor. If the transform should never be done, return zero. If the
2781 /// transform should be done, return the minimum number of divisor uses
2782 /// that must exist.
2783 virtual unsigned combineRepeatedFPDivisors() const {
2787 /// Hooks for building estimates in place of slower divisions and square
2790 /// Return a reciprocal square root estimate value for the input operand.
2791 /// The RefinementSteps output is the number of Newton-Raphson refinement
2792 /// iterations required to generate a sufficient (though not necessarily
2793 /// IEEE-754 compliant) estimate for the value type.
2794 /// The boolean UseOneConstNR output is used to select a Newton-Raphson
2795 /// algorithm implementation that uses one constant or two constants.
2796 /// A target may choose to implement its own refinement within this function.
2797 /// If that's true, then return '0' as the number of RefinementSteps to avoid
2798 /// any further refinement of the estimate.
2799 /// An empty SDValue return means no estimate sequence can be created.
2800 virtual SDValue getRsqrtEstimate(SDValue Operand, DAGCombinerInfo &DCI,
2801 unsigned &RefinementSteps,
2802 bool &UseOneConstNR) const {
2806 /// Return a reciprocal estimate value for the input operand.
2807 /// The RefinementSteps output is the number of Newton-Raphson refinement
2808 /// iterations required to generate a sufficient (though not necessarily
2809 /// IEEE-754 compliant) estimate for the value type.
2810 /// A target may choose to implement its own refinement within this function.
2811 /// If that's true, then return '0' as the number of RefinementSteps to avoid
2812 /// any further refinement of the estimate.
2813 /// An empty SDValue return means no estimate sequence can be created.
2814 virtual SDValue getRecipEstimate(SDValue Operand, DAGCombinerInfo &DCI,
2815 unsigned &RefinementSteps) const {
2819 //===--------------------------------------------------------------------===//
2820 // Legalization utility functions
2823 /// Expand a MUL into two nodes. One that computes the high bits of
2824 /// the result and one that computes the low bits.
2825 /// \param HiLoVT The value type to use for the Lo and Hi nodes.
2826 /// \param LL Low bits of the LHS of the MUL. You can use this parameter
2827 /// if you want to control how low bits are extracted from the LHS.
2828 /// \param LH High bits of the LHS of the MUL. See LL for meaning.
2829 /// \param RL Low bits of the RHS of the MUL. See LL for meaning
2830 /// \param RH High bits of the RHS of the MUL. See LL for meaning.
2831 /// \returns true if the node has been expanded. false if it has not
2832 bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
2833 SelectionDAG &DAG, SDValue LL = SDValue(),
2834 SDValue LH = SDValue(), SDValue RL = SDValue(),
2835 SDValue RH = SDValue()) const;
2837 /// Expand float(f32) to SINT(i64) conversion
2838 /// \param N Node to expand
2839 /// \param Result output after conversion
2840 /// \returns True, if the expansion was successful, false otherwise
2841 bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
2843 //===--------------------------------------------------------------------===//
2844 // Instruction Emitting Hooks
2847 /// This method should be implemented by targets that mark instructions with
2848 /// the 'usesCustomInserter' flag. These instructions are special in various
2849 /// ways, which require special support to insert. The specified MachineInstr
2850 /// is created but not inserted into any basic blocks, and this method is
2851 /// called to expand it into a sequence of instructions, potentially also
2852 /// creating new basic blocks and control flow.
2853 /// As long as the returned basic block is different (i.e., we created a new
2854 /// one), the custom inserter is free to modify the rest of \p MBB.
2855 virtual MachineBasicBlock *
2856 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2858 /// This method should be implemented by targets that mark instructions with
2859 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2860 /// instruction selection by target hooks. e.g. To fill in optional defs for
2861 /// ARM 's' setting instructions.
2863 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2865 /// If this function returns true, SelectionDAGBuilder emits a
2866 /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
2867 virtual bool useLoadStackGuardNode() const {
2871 /// Lower TLS global address SDNode for target independent emulated TLS model.
2872 virtual SDValue LowerToTLSEmulatedModel(const GlobalAddressSDNode *GA,
2873 SelectionDAG &DAG) const;
2876 /// Given an LLVM IR type and return type attributes, compute the return value
2877 /// EVTs and flags, and optionally also the offsets, if the return value is
2878 /// being lowered to memory.
2879 void GetReturnInfo(Type *ReturnType, AttributeSet attr,
2880 SmallVectorImpl<ISD::OutputArg> &Outs,
2881 const TargetLowering &TLI, const DataLayout &DL);
2883 } // end llvm namespace