1 //===-- llvm/Target/TargetLowering.h - Target Lowering Info -----*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
11 /// This file describes how to lower LLVM code to machine code. This has two
14 /// 1. Which ValueTypes are natively supported by the target.
15 /// 2. Which operations are supported for supported ValueTypes.
16 /// 3. Cost thresholds for alternative implementations of certain operations.
18 /// In addition it has a few other components, like information about FP
21 //===----------------------------------------------------------------------===//
23 #ifndef LLVM_TARGET_TARGETLOWERING_H
24 #define LLVM_TARGET_TARGETLOWERING_H
26 #include "llvm/ADT/DenseMap.h"
27 #include "llvm/CodeGen/DAGCombine.h"
28 #include "llvm/CodeGen/RuntimeLibcalls.h"
29 #include "llvm/CodeGen/SelectionDAGNodes.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/CallingConv.h"
32 #include "llvm/IR/InlineAsm.h"
33 #include "llvm/Support/CallSite.h"
34 #include "llvm/Target/TargetCallingConv.h"
35 #include "llvm/Target/TargetMachine.h"
44 class FunctionLoweringInfo;
45 class ImmutableCallSite;
47 class MachineBasicBlock;
48 class MachineFunction;
50 class MachineJumpTableInfo;
53 template<typename T> class SmallVectorImpl;
55 class TargetRegisterClass;
56 class TargetLibraryInfo;
57 class TargetLoweringObjectFile;
62 None, // No preference
63 Source, // Follow source order.
64 RegPressure, // Scheduling for lowest register pressure.
65 Hybrid, // Scheduling for both latency and register pressure.
66 ILP, // Scheduling for ILP in low register pressure mode.
67 VLIW // Scheduling for VLIW targets.
71 /// This base class for TargetLowering contains the SelectionDAG-independent
72 /// parts that can be used from the rest of CodeGen.
73 class TargetLoweringBase {
74 TargetLoweringBase(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
75 void operator=(const TargetLoweringBase&) LLVM_DELETED_FUNCTION;
78 /// This enum indicates whether operations are valid for a target, and if not,
79 /// what action should be used to make them valid.
81 Legal, // The target natively supports this operation.
82 Promote, // This operation should be executed in a larger type.
83 Expand, // Try to expand this to other ops, otherwise use a libcall.
84 Custom // Use the LowerOperation hook to implement custom lowering.
87 /// This enum indicates whether a types are legal for a target, and if not,
88 /// what action should be used to make them valid.
89 enum LegalizeTypeAction {
90 TypeLegal, // The target natively supports this type.
91 TypePromoteInteger, // Replace this integer with a larger one.
92 TypeExpandInteger, // Split this integer into two of half the size.
93 TypeSoftenFloat, // Convert this float to a same size integer type.
94 TypeExpandFloat, // Split this float into two of half the size.
95 TypeScalarizeVector, // Replace this one-element vector with its element.
96 TypeSplitVector, // Split this vector into two of half the size.
97 TypeWidenVector // This vector should be widened into a larger vector.
100 /// LegalizeKind holds the legalization kind that needs to happen to EVT
101 /// in order to type-legalize it.
102 typedef std::pair<LegalizeTypeAction, EVT> LegalizeKind;
104 /// Enum that describes how the target represents true/false values.
105 enum BooleanContent {
106 UndefinedBooleanContent, // Only bit 0 counts, the rest can hold garbage.
107 ZeroOrOneBooleanContent, // All bits zero except for bit 0.
108 ZeroOrNegativeOneBooleanContent // All bits equal to bit 0.
111 /// Enum that describes what type of support for selects the target has.
112 enum SelectSupportKind {
113 ScalarValSelect, // The target supports scalar selects (ex: cmov).
114 ScalarCondVectorVal, // The target supports selects with a scalar condition
115 // and vector values (ex: cmov).
116 VectorMaskSelect // The target supports vector selects with a vector
117 // mask (ex: x86 blends).
120 static ISD::NodeType getExtendForContent(BooleanContent Content) {
122 case UndefinedBooleanContent:
123 // Extend by adding rubbish bits.
124 return ISD::ANY_EXTEND;
125 case ZeroOrOneBooleanContent:
126 // Extend by adding zero bits.
127 return ISD::ZERO_EXTEND;
128 case ZeroOrNegativeOneBooleanContent:
129 // Extend by copying the sign bit.
130 return ISD::SIGN_EXTEND;
132 llvm_unreachable("Invalid content kind");
135 /// NOTE: The constructor takes ownership of TLOF.
136 explicit TargetLoweringBase(const TargetMachine &TM,
137 const TargetLoweringObjectFile *TLOF);
138 virtual ~TargetLoweringBase();
141 /// \brief Initialize all of the actions to default values.
145 const TargetMachine &getTargetMachine() const { return TM; }
146 const DataLayout *getDataLayout() const { return TD; }
147 const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
149 bool isBigEndian() const { return !IsLittleEndian; }
150 bool isLittleEndian() const { return IsLittleEndian; }
151 // Return the pointer type for the given address space, defaults to
152 // the pointer type from the data layout.
153 // FIXME: The default needs to be removed once all the code is updated.
154 virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
155 unsigned getPointerSizeInBits(uint32_t AS = 0) const;
156 unsigned getPointerTypeSizeInBits(Type *Ty) const;
157 virtual MVT getScalarShiftAmountTy(EVT LHSTy) const;
159 EVT getShiftAmountTy(EVT LHSTy) const;
161 /// Returns the type to be used for the index operand of:
162 /// ISD::INSERT_VECTOR_ELT, ISD::EXTRACT_VECTOR_ELT,
163 /// ISD::INSERT_SUBVECTOR, and ISD::EXTRACT_SUBVECTOR
164 virtual MVT getVectorIdxTy() const {
165 return getPointerTy();
168 /// Return true if the select operation is expensive for this target.
169 bool isSelectExpensive() const { return SelectIsExpensive; }
171 virtual bool isSelectSupported(SelectSupportKind /*kind*/) const {
175 /// Return true if a vector of the given type should be split
176 /// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type
178 virtual bool shouldSplitVectorElementType(EVT /*VT*/) const { return false; }
180 /// Return true if integer divide is usually cheaper than a sequence of
181 /// several shifts, adds, and multiplies for this target.
182 bool isIntDivCheap() const { return IntDivIsCheap; }
184 /// Returns true if target has indicated at least one type should be bypassed.
185 bool isSlowDivBypassed() const { return !BypassSlowDivWidths.empty(); }
187 /// Returns map of slow types for division or remainder with corresponding
189 const DenseMap<unsigned int, unsigned int> &getBypassSlowDivWidths() const {
190 return BypassSlowDivWidths;
193 /// Return true if pow2 div is cheaper than a chain of srl/add/sra.
194 bool isPow2DivCheap() const { return Pow2DivIsCheap; }
196 /// Return true if Flow Control is an expensive operation that should be
198 bool isJumpExpensive() const { return JumpIsExpensive; }
200 /// Return true if selects are only cheaper than branches if the branch is
201 /// unlikely to be predicted right.
202 bool isPredictableSelectExpensive() const {
203 return PredictableSelectIsExpensive;
206 /// Return the ValueType of the result of SETCC operations. Also used to
207 /// obtain the target's preferred type for the condition operand of SELECT and
208 /// BRCOND nodes. In the case of BRCOND the argument passed is MVT::Other
209 /// since there are no other operands to get a type hint from.
210 virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
212 /// Return the ValueType for comparison libcalls. Comparions libcalls include
213 /// floating point comparion calls, and Ordered/Unordered check calls on
214 /// floating point numbers.
216 MVT::SimpleValueType getCmpLibcallReturnType() const;
218 /// For targets without i1 registers, this gives the nature of the high-bits
219 /// of boolean values held in types wider than i1.
221 /// "Boolean values" are special true/false values produced by nodes like
222 /// SETCC and consumed (as the condition) by nodes like SELECT and BRCOND.
223 /// Not to be confused with general values promoted from i1. Some cpus
224 /// distinguish between vectors of boolean and scalars; the isVec parameter
225 /// selects between the two kinds. For example on X86 a scalar boolean should
226 /// be zero extended from i1, while the elements of a vector of booleans
227 /// should be sign extended from i1.
228 BooleanContent getBooleanContents(bool isVec) const {
229 return isVec ? BooleanVectorContents : BooleanContents;
232 /// Return target scheduling preference.
233 Sched::Preference getSchedulingPreference() const {
234 return SchedPreferenceInfo;
237 /// Some scheduler, e.g. hybrid, can switch to different scheduling heuristics
238 /// for different nodes. This function returns the preference (or none) for
240 virtual Sched::Preference getSchedulingPreference(SDNode *) const {
244 /// Return the register class that should be used for the specified value
246 virtual const TargetRegisterClass *getRegClassFor(MVT VT) const {
247 const TargetRegisterClass *RC = RegClassForVT[VT.SimpleTy];
248 assert(RC && "This value type is not natively supported!");
252 /// Return the 'representative' register class for the specified value
255 /// The 'representative' register class is the largest legal super-reg
256 /// register class for the register class of the value type. For example, on
257 /// i386 the rep register class for i8, i16, and i32 are GR32; while the rep
258 /// register class is GR64 on x86_64.
259 virtual const TargetRegisterClass *getRepRegClassFor(MVT VT) const {
260 const TargetRegisterClass *RC = RepRegClassForVT[VT.SimpleTy];
264 /// Return the cost of the 'representative' register class for the specified
266 virtual uint8_t getRepRegClassCostFor(MVT VT) const {
267 return RepRegClassCostForVT[VT.SimpleTy];
270 /// Return true if the target has native support for the specified value type.
271 /// This means that it has a register that directly holds it without
272 /// promotions or expansions.
273 bool isTypeLegal(EVT VT) const {
274 assert(!VT.isSimple() ||
275 (unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
276 return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
279 class ValueTypeActionImpl {
280 /// ValueTypeActions - For each value type, keep a LegalizeTypeAction enum
281 /// that indicates how instruction selection should deal with the type.
282 uint8_t ValueTypeActions[MVT::LAST_VALUETYPE];
285 ValueTypeActionImpl() {
286 std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
289 LegalizeTypeAction getTypeAction(MVT VT) const {
290 return (LegalizeTypeAction)ValueTypeActions[VT.SimpleTy];
293 void setTypeAction(MVT VT, LegalizeTypeAction Action) {
294 unsigned I = VT.SimpleTy;
295 ValueTypeActions[I] = Action;
299 const ValueTypeActionImpl &getValueTypeActions() const {
300 return ValueTypeActions;
303 /// Return how we should legalize values of this type, either it is already
304 /// legal (return 'Legal') or we need to promote it to a larger type (return
305 /// 'Promote'), or we need to expand it into multiple registers of smaller
306 /// integer type (return 'Expand'). 'Custom' is not an option.
307 LegalizeTypeAction getTypeAction(LLVMContext &Context, EVT VT) const {
308 return getTypeConversion(Context, VT).first;
310 LegalizeTypeAction getTypeAction(MVT VT) const {
311 return ValueTypeActions.getTypeAction(VT);
314 /// For types supported by the target, this is an identity function. For
315 /// types that must be promoted to larger types, this returns the larger type
316 /// to promote to. For integer types that are larger than the largest integer
317 /// register, this contains one step in the expansion to get to the smaller
318 /// register. For illegal floating point types, this returns the integer type
320 EVT getTypeToTransformTo(LLVMContext &Context, EVT VT) const {
321 return getTypeConversion(Context, VT).second;
324 /// For types supported by the target, this is an identity function. For
325 /// types that must be expanded (i.e. integer types that are larger than the
326 /// largest integer register or illegal floating point types), this returns
327 /// the largest legal type it will be expanded to.
328 EVT getTypeToExpandTo(LLVMContext &Context, EVT VT) const {
329 assert(!VT.isVector());
331 switch (getTypeAction(Context, VT)) {
334 case TypeExpandInteger:
335 VT = getTypeToTransformTo(Context, VT);
338 llvm_unreachable("Type is not legal nor is it to be expanded!");
343 /// Vector types are broken down into some number of legal first class types.
344 /// For example, EVT::v8f32 maps to 2 EVT::v4f32 with Altivec or SSE1, or 8
345 /// promoted EVT::f64 values with the X86 FP stack. Similarly, EVT::v2i64
346 /// turns into 4 EVT::i32 values with both PPC and X86.
348 /// This method returns the number of registers needed, and the VT for each
349 /// register. It also returns the VT and quantity of the intermediate values
350 /// before they are promoted/expanded.
351 unsigned getVectorTypeBreakdown(LLVMContext &Context, EVT VT,
353 unsigned &NumIntermediates,
354 MVT &RegisterVT) const;
356 struct IntrinsicInfo {
357 unsigned opc; // target opcode
358 EVT memVT; // memory VT
359 const Value* ptrVal; // value representing memory location
360 int offset; // offset off of ptrVal
361 unsigned align; // alignment
362 bool vol; // is volatile?
363 bool readMem; // reads memory?
364 bool writeMem; // writes memory?
367 /// Given an intrinsic, checks if on the target the intrinsic will need to map
368 /// to a MemIntrinsicNode (touches memory). If this is the case, it returns
369 /// true and store the intrinsic information into the IntrinsicInfo that was
370 /// passed to the function.
371 virtual bool getTgtMemIntrinsic(IntrinsicInfo &, const CallInst &,
372 unsigned /*Intrinsic*/) const {
376 /// Returns true if the target can instruction select the specified FP
377 /// immediate natively. If false, the legalizer will materialize the FP
378 /// immediate as a load from a constant pool.
379 virtual bool isFPImmLegal(const APFloat &/*Imm*/, EVT /*VT*/) const {
383 /// Targets can use this to indicate that they only support *some*
384 /// VECTOR_SHUFFLE operations, those with specific masks. By default, if a
385 /// target supports the VECTOR_SHUFFLE node, all mask values are assumed to be
387 virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
392 /// Returns true if the operation can trap for the value type.
394 /// VT must be a legal type. By default, we optimistically assume most
395 /// operations don't trap except for divide and remainder.
396 virtual bool canOpTrap(unsigned Op, EVT VT) const;
398 /// Similar to isShuffleMaskLegal. This is used by Targets can use this to
399 /// indicate if there is a suitable VECTOR_SHUFFLE that can be used to replace
400 /// a VAND with a constant pool entry.
401 virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &/*Mask*/,
406 /// Return how this operation should be treated: either it is legal, needs to
407 /// be promoted to a larger size, needs to be expanded to some other code
408 /// sequence, or the target has a custom expander for it.
409 LegalizeAction getOperationAction(unsigned Op, EVT VT) const {
410 if (VT.isExtended()) return Expand;
411 // If a target-specific SDNode requires legalization, require the target
412 // to provide custom legalization for it.
413 if (Op > array_lengthof(OpActions[0])) return Custom;
414 unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
415 return (LegalizeAction)OpActions[I][Op];
418 /// Return true if the specified operation is legal on this target or can be
419 /// made legal with custom lowering. This is used to help guide high-level
420 /// lowering decisions.
421 bool isOperationLegalOrCustom(unsigned Op, EVT VT) const {
422 return (VT == MVT::Other || isTypeLegal(VT)) &&
423 (getOperationAction(Op, VT) == Legal ||
424 getOperationAction(Op, VT) == Custom);
427 /// Return true if the specified operation is legal on this target or can be
428 /// made legal using promotion. This is used to help guide high-level lowering
430 bool isOperationLegalOrPromote(unsigned Op, EVT VT) const {
431 return (VT == MVT::Other || isTypeLegal(VT)) &&
432 (getOperationAction(Op, VT) == Legal ||
433 getOperationAction(Op, VT) == Promote);
436 /// Return true if the specified operation is illegal on this target or
437 /// unlikely to be made legal with custom lowering. This is used to help guide
438 /// high-level lowering decisions.
439 bool isOperationExpand(unsigned Op, EVT VT) const {
440 return (!isTypeLegal(VT) || getOperationAction(Op, VT) == Expand);
443 /// Return true if the specified operation is legal on this target.
444 bool isOperationLegal(unsigned Op, EVT VT) const {
445 return (VT == MVT::Other || isTypeLegal(VT)) &&
446 getOperationAction(Op, VT) == Legal;
449 /// Return how this load with extension should be treated: either it is legal,
450 /// needs to be promoted to a larger size, needs to be expanded to some other
451 /// code sequence, or the target has a custom expander for it.
452 LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
453 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
454 "Table isn't big enough!");
455 return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
458 /// Return true if the specified load with extension is legal on this target.
459 bool isLoadExtLegal(unsigned ExtType, EVT VT) const {
460 return VT.isSimple() &&
461 getLoadExtAction(ExtType, VT.getSimpleVT()) == Legal;
464 /// Return how this store with truncation should be treated: either it is
465 /// legal, needs to be promoted to a larger size, needs to be expanded to some
466 /// other code sequence, or the target has a custom expander for it.
467 LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
468 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
469 "Table isn't big enough!");
470 return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
474 /// Return true if the specified store with truncation is legal on this
476 bool isTruncStoreLegal(EVT ValVT, EVT MemVT) const {
477 return isTypeLegal(ValVT) && MemVT.isSimple() &&
478 getTruncStoreAction(ValVT.getSimpleVT(), MemVT.getSimpleVT()) == Legal;
481 /// Return how the indexed load should be treated: either it is legal, needs
482 /// to be promoted to a larger size, needs to be expanded to some other code
483 /// sequence, or the target has a custom expander for it.
485 getIndexedLoadAction(unsigned IdxMode, MVT VT) const {
486 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
487 "Table isn't big enough!");
488 unsigned Ty = (unsigned)VT.SimpleTy;
489 return (LegalizeAction)((IndexedModeActions[Ty][IdxMode] & 0xf0) >> 4);
492 /// Return true if the specified indexed load is legal on this target.
493 bool isIndexedLoadLegal(unsigned IdxMode, EVT VT) const {
494 return VT.isSimple() &&
495 (getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Legal ||
496 getIndexedLoadAction(IdxMode, VT.getSimpleVT()) == Custom);
499 /// Return how the indexed store should be treated: either it is legal, needs
500 /// to be promoted to a larger size, needs to be expanded to some other code
501 /// sequence, or the target has a custom expander for it.
503 getIndexedStoreAction(unsigned IdxMode, MVT VT) const {
504 assert(IdxMode < ISD::LAST_INDEXED_MODE && VT < MVT::LAST_VALUETYPE &&
505 "Table isn't big enough!");
506 unsigned Ty = (unsigned)VT.SimpleTy;
507 return (LegalizeAction)(IndexedModeActions[Ty][IdxMode] & 0x0f);
510 /// Return true if the specified indexed load is legal on this target.
511 bool isIndexedStoreLegal(unsigned IdxMode, EVT VT) const {
512 return VT.isSimple() &&
513 (getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Legal ||
514 getIndexedStoreAction(IdxMode, VT.getSimpleVT()) == Custom);
517 /// Return how the condition code should be treated: either it is legal, needs
518 /// to be expanded to some other code sequence, or the target has a custom
521 getCondCodeAction(ISD::CondCode CC, MVT VT) const {
522 assert((unsigned)CC < array_lengthof(CondCodeActions) &&
523 (unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 &&
524 "Table isn't big enough!");
525 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
526 /// value and the upper 27 bits index into the second dimension of the
527 /// array to select what 64bit value to use.
528 LegalizeAction Action = (LegalizeAction)
529 ((CondCodeActions[CC][VT.SimpleTy >> 5] >> (2*(VT.SimpleTy & 0x1F))) & 3);
530 assert(Action != Promote && "Can't promote condition code!");
534 /// Return true if the specified condition code is legal on this target.
535 bool isCondCodeLegal(ISD::CondCode CC, MVT VT) const {
537 getCondCodeAction(CC, VT) == Legal ||
538 getCondCodeAction(CC, VT) == Custom;
542 /// If the action for this operation is to promote, this method returns the
543 /// ValueType to promote to.
544 MVT getTypeToPromoteTo(unsigned Op, MVT VT) const {
545 assert(getOperationAction(Op, VT) == Promote &&
546 "This operation isn't promoted!");
548 // See if this has an explicit type specified.
549 std::map<std::pair<unsigned, MVT::SimpleValueType>,
550 MVT::SimpleValueType>::const_iterator PTTI =
551 PromoteToType.find(std::make_pair(Op, VT.SimpleTy));
552 if (PTTI != PromoteToType.end()) return PTTI->second;
554 assert((VT.isInteger() || VT.isFloatingPoint()) &&
555 "Cannot autopromote this type, add it with AddPromotedToType.");
559 NVT = (MVT::SimpleValueType)(NVT.SimpleTy+1);
560 assert(NVT.isInteger() == VT.isInteger() && NVT != MVT::isVoid &&
561 "Didn't find type to promote to!");
562 } while (!isTypeLegal(NVT) ||
563 getOperationAction(Op, NVT) == Promote);
567 /// Return the EVT corresponding to this LLVM type. This is fixed by the LLVM
568 /// operations except for the pointer size. If AllowUnknown is true, this
569 /// will return MVT::Other for types with no EVT counterpart (e.g. structs),
570 /// otherwise it will assert.
571 EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
572 // Lower scalar pointers to native pointer types.
573 if (PointerType *PTy = dyn_cast<PointerType>(Ty))
574 return getPointerTy(PTy->getAddressSpace());
576 if (Ty->isVectorTy()) {
577 VectorType *VTy = cast<VectorType>(Ty);
578 Type *Elm = VTy->getElementType();
579 // Lower vectors of pointers to native pointer types.
580 if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
581 EVT PointerTy(getPointerTy(PT->getAddressSpace()));
582 Elm = PointerTy.getTypeForEVT(Ty->getContext());
585 return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
586 VTy->getNumElements());
588 return EVT::getEVT(Ty, AllowUnknown);
591 /// Return the MVT corresponding to this LLVM type. See getValueType.
592 MVT getSimpleValueType(Type *Ty, bool AllowUnknown = false) const {
593 return getValueType(Ty, AllowUnknown).getSimpleVT();
596 /// Return the desired alignment for ByVal aggregate function arguments in the
597 /// caller parameter area. This is the actual alignment, not its logarithm.
598 virtual unsigned getByValTypeAlignment(Type *Ty) const;
600 /// Return the type of registers that this ValueType will eventually require.
601 MVT getRegisterType(MVT VT) const {
602 assert((unsigned)VT.SimpleTy < array_lengthof(RegisterTypeForVT));
603 return RegisterTypeForVT[VT.SimpleTy];
606 /// Return the type of registers that this ValueType will eventually require.
607 MVT getRegisterType(LLVMContext &Context, EVT VT) const {
609 assert((unsigned)VT.getSimpleVT().SimpleTy <
610 array_lengthof(RegisterTypeForVT));
611 return RegisterTypeForVT[VT.getSimpleVT().SimpleTy];
616 unsigned NumIntermediates;
617 (void)getVectorTypeBreakdown(Context, VT, VT1,
618 NumIntermediates, RegisterVT);
621 if (VT.isInteger()) {
622 return getRegisterType(Context, getTypeToTransformTo(Context, VT));
624 llvm_unreachable("Unsupported extended type!");
627 /// Return the number of registers that this ValueType will eventually
630 /// This is one for any types promoted to live in larger registers, but may be
631 /// more than one for types (like i64) that are split into pieces. For types
632 /// like i140, which are first promoted then expanded, it is the number of
633 /// registers needed to hold all the bits of the original type. For an i140
634 /// on a 32 bit machine this means 5 registers.
635 unsigned getNumRegisters(LLVMContext &Context, EVT VT) const {
637 assert((unsigned)VT.getSimpleVT().SimpleTy <
638 array_lengthof(NumRegistersForVT));
639 return NumRegistersForVT[VT.getSimpleVT().SimpleTy];
644 unsigned NumIntermediates;
645 return getVectorTypeBreakdown(Context, VT, VT1, NumIntermediates, VT2);
647 if (VT.isInteger()) {
648 unsigned BitWidth = VT.getSizeInBits();
649 unsigned RegWidth = getRegisterType(Context, VT).getSizeInBits();
650 return (BitWidth + RegWidth - 1) / RegWidth;
652 llvm_unreachable("Unsupported extended type!");
655 /// If true, then instruction selection should seek to shrink the FP constant
656 /// of the specified type to a smaller type in order to save space and / or
658 virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
660 /// If true, the target has custom DAG combine transformations that it can
661 /// perform for the specified node.
662 bool hasTargetDAGCombine(ISD::NodeType NT) const {
663 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
664 return TargetDAGCombineArray[NT >> 3] & (1 << (NT&7));
667 /// \brief Get maximum # of store operations permitted for llvm.memset
669 /// This function returns the maximum number of store operations permitted
670 /// to replace a call to llvm.memset. The value is set by the target at the
671 /// performance threshold for such a replacement. If OptSize is true,
672 /// return the limit for functions that have OptSize attribute.
673 unsigned getMaxStoresPerMemset(bool OptSize) const {
674 return OptSize ? MaxStoresPerMemsetOptSize : MaxStoresPerMemset;
677 /// \brief Get maximum # of store operations permitted for llvm.memcpy
679 /// This function returns the maximum number of store operations permitted
680 /// to replace a call to llvm.memcpy. The value is set by the target at the
681 /// performance threshold for such a replacement. If OptSize is true,
682 /// return the limit for functions that have OptSize attribute.
683 unsigned getMaxStoresPerMemcpy(bool OptSize) const {
684 return OptSize ? MaxStoresPerMemcpyOptSize : MaxStoresPerMemcpy;
687 /// \brief Get maximum # of store operations permitted for llvm.memmove
689 /// This function returns the maximum number of store operations permitted
690 /// to replace a call to llvm.memmove. The value is set by the target at the
691 /// performance threshold for such a replacement. If OptSize is true,
692 /// return the limit for functions that have OptSize attribute.
693 unsigned getMaxStoresPerMemmove(bool OptSize) const {
694 return OptSize ? MaxStoresPerMemmoveOptSize : MaxStoresPerMemmove;
697 /// \brief Determine if the target supports unaligned memory accesses.
699 /// This function returns true if the target allows unaligned memory accesses.
700 /// of the specified type. If true, it also returns whether the unaligned
701 /// memory access is "fast" in the second argument by reference. This is used,
702 /// for example, in situations where an array copy/move/set is converted to a
703 /// sequence of store operations. It's use helps to ensure that such
704 /// replacements don't generate code that causes an alignment error (trap) on
705 /// the target machine.
706 virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const {
710 /// Returns the target specific optimal type for load and store operations as
711 /// a result of memset, memcpy, and memmove lowering.
713 /// If DstAlign is zero that means it's safe to destination alignment can
714 /// satisfy any constraint. Similarly if SrcAlign is zero it means there isn't
715 /// a need to check it against alignment requirement, probably because the
716 /// source does not need to be loaded. If 'IsMemset' is true, that means it's
717 /// expanding a memset. If 'ZeroMemset' is true, that means it's a memset of
718 /// zero. 'MemcpyStrSrc' indicates whether the memcpy source is constant so it
719 /// does not need to be loaded. It returns EVT::Other if the type should be
720 /// determined using generic target-independent logic.
721 virtual EVT getOptimalMemOpType(uint64_t /*Size*/,
722 unsigned /*DstAlign*/, unsigned /*SrcAlign*/,
725 bool /*MemcpyStrSrc*/,
726 MachineFunction &/*MF*/) const {
730 /// Returns true if it's safe to use load / store of the specified type to
731 /// expand memcpy / memset inline.
733 /// This is mostly true for all types except for some special cases. For
734 /// example, on X86 targets without SSE2 f64 load / store are done with fldl /
735 /// fstpl which also does type conversion. Note the specified type doesn't
736 /// have to be legal as the hook is used before type legalization.
737 virtual bool isSafeMemOpType(MVT /*VT*/) const { return true; }
739 /// Determine if we should use _setjmp or setjmp to implement llvm.setjmp.
740 bool usesUnderscoreSetJmp() const {
741 return UseUnderscoreSetJmp;
744 /// Determine if we should use _longjmp or longjmp to implement llvm.longjmp.
745 bool usesUnderscoreLongJmp() const {
746 return UseUnderscoreLongJmp;
749 /// Return whether the target can generate code for jump tables.
750 bool supportJumpTables() const {
751 return SupportJumpTables;
754 /// Return integer threshold on number of blocks to use jump tables rather
755 /// than if sequence.
756 int getMinimumJumpTableEntries() const {
757 return MinimumJumpTableEntries;
760 /// If a physical register, this specifies the register that
761 /// llvm.savestack/llvm.restorestack should save and restore.
762 unsigned getStackPointerRegisterToSaveRestore() const {
763 return StackPointerRegisterToSaveRestore;
766 /// If a physical register, this returns the register that receives the
767 /// exception address on entry to a landing pad.
768 unsigned getExceptionPointerRegister() const {
769 return ExceptionPointerRegister;
772 /// If a physical register, this returns the register that receives the
773 /// exception typeid on entry to a landing pad.
774 unsigned getExceptionSelectorRegister() const {
775 return ExceptionSelectorRegister;
778 /// Returns the target's jmp_buf size in bytes (if never set, the default is
780 unsigned getJumpBufSize() const {
784 /// Returns the target's jmp_buf alignment in bytes (if never set, the default
786 unsigned getJumpBufAlignment() const {
787 return JumpBufAlignment;
790 /// Return the minimum stack alignment of an argument.
791 unsigned getMinStackArgumentAlignment() const {
792 return MinStackArgumentAlignment;
795 /// Return the minimum function alignment.
796 unsigned getMinFunctionAlignment() const {
797 return MinFunctionAlignment;
800 /// Return the preferred function alignment.
801 unsigned getPrefFunctionAlignment() const {
802 return PrefFunctionAlignment;
805 /// Return the preferred loop alignment.
806 unsigned getPrefLoopAlignment() const {
807 return PrefLoopAlignment;
810 /// Return whether the DAG builder should automatically insert fences and
811 /// reduce ordering for atomics.
812 bool getInsertFencesForAtomic() const {
813 return InsertFencesForAtomic;
816 /// Return true if the target stores stack protector cookies at a fixed offset
817 /// in some non-standard address space, and populates the address space and
818 /// offset as appropriate.
819 virtual bool getStackCookieLocation(unsigned &/*AddressSpace*/,
820 unsigned &/*Offset*/) const {
824 /// Returns the maximal possible offset which can be used for loads / stores
826 virtual unsigned getMaximalGlobalOffset() const {
830 //===--------------------------------------------------------------------===//
831 /// \name Helpers for TargetTransformInfo implementations
834 /// Get the ISD node that corresponds to the Instruction class opcode.
835 int InstructionOpcodeToISD(unsigned Opcode) const;
837 /// Estimate the cost of type-legalization and the legalized type.
838 std::pair<unsigned, MVT> getTypeLegalizationCost(Type *Ty) const;
842 //===--------------------------------------------------------------------===//
843 // TargetLowering Configuration Methods - These methods should be invoked by
844 // the derived class constructor to configure this object for the target.
847 /// \brief Reset the operation actions based on target options.
848 virtual void resetOperationActions() {}
851 /// Specify how the target extends the result of a boolean value from i1 to a
852 /// wider type. See getBooleanContents.
853 void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
855 /// Specify how the target extends the result of a vector boolean value from a
856 /// vector of i1 to a wider type. See getBooleanContents.
857 void setBooleanVectorContents(BooleanContent Ty) {
858 BooleanVectorContents = Ty;
861 /// Specify the target scheduling preference.
862 void setSchedulingPreference(Sched::Preference Pref) {
863 SchedPreferenceInfo = Pref;
866 /// Indicate whether this target prefers to use _setjmp to implement
867 /// llvm.setjmp or the non _ version. Defaults to false.
868 void setUseUnderscoreSetJmp(bool Val) {
869 UseUnderscoreSetJmp = Val;
872 /// Indicate whether this target prefers to use _longjmp to implement
873 /// llvm.longjmp or the non _ version. Defaults to false.
874 void setUseUnderscoreLongJmp(bool Val) {
875 UseUnderscoreLongJmp = Val;
878 /// Indicate whether the target can generate code for jump tables.
879 void setSupportJumpTables(bool Val) {
880 SupportJumpTables = Val;
883 /// Indicate the number of blocks to generate jump tables rather than if
885 void setMinimumJumpTableEntries(int Val) {
886 MinimumJumpTableEntries = Val;
889 /// If set to a physical register, this specifies the register that
890 /// llvm.savestack/llvm.restorestack should save and restore.
891 void setStackPointerRegisterToSaveRestore(unsigned R) {
892 StackPointerRegisterToSaveRestore = R;
895 /// If set to a physical register, this sets the register that receives the
896 /// exception address on entry to a landing pad.
897 void setExceptionPointerRegister(unsigned R) {
898 ExceptionPointerRegister = R;
901 /// If set to a physical register, this sets the register that receives the
902 /// exception typeid on entry to a landing pad.
903 void setExceptionSelectorRegister(unsigned R) {
904 ExceptionSelectorRegister = R;
907 /// Tells the code generator not to expand operations into sequences that use
908 /// the select operations if possible.
909 void setSelectIsExpensive(bool isExpensive = true) {
910 SelectIsExpensive = isExpensive;
913 /// Tells the code generator not to expand sequence of operations into a
914 /// separate sequences that increases the amount of flow control.
915 void setJumpIsExpensive(bool isExpensive = true) {
916 JumpIsExpensive = isExpensive;
919 /// Tells the code generator that integer divide is expensive, and if
920 /// possible, should be replaced by an alternate sequence of instructions not
921 /// containing an integer divide.
922 void setIntDivIsCheap(bool isCheap = true) { IntDivIsCheap = isCheap; }
924 /// Tells the code generator which bitwidths to bypass.
925 void addBypassSlowDiv(unsigned int SlowBitWidth, unsigned int FastBitWidth) {
926 BypassSlowDivWidths[SlowBitWidth] = FastBitWidth;
929 /// Tells the code generator that it shouldn't generate srl/add/sra for a
930 /// signed divide by power of two, and let the target handle it.
931 void setPow2DivIsCheap(bool isCheap = true) { Pow2DivIsCheap = isCheap; }
933 /// Add the specified register class as an available regclass for the
934 /// specified value type. This indicates the selector can handle values of
935 /// that class natively.
936 void addRegisterClass(MVT VT, const TargetRegisterClass *RC) {
937 assert((unsigned)VT.SimpleTy < array_lengthof(RegClassForVT));
938 AvailableRegClasses.push_back(std::make_pair(VT, RC));
939 RegClassForVT[VT.SimpleTy] = RC;
942 /// Remove all register classes.
943 void clearRegisterClasses() {
944 memset(RegClassForVT, 0,MVT::LAST_VALUETYPE * sizeof(TargetRegisterClass*));
946 AvailableRegClasses.clear();
949 /// \brief Remove all operation actions.
950 void clearOperationActions() {
953 /// Return the largest legal super-reg register class of the register class
954 /// for the specified type and its associated "cost".
955 virtual std::pair<const TargetRegisterClass*, uint8_t>
956 findRepresentativeClass(MVT VT) const;
958 /// Once all of the register classes are added, this allows us to compute
959 /// derived properties we expose.
960 void computeRegisterProperties();
962 /// Indicate that the specified operation does not work with the specified
963 /// type and indicate what to do about it.
964 void setOperationAction(unsigned Op, MVT VT,
965 LegalizeAction Action) {
966 assert(Op < array_lengthof(OpActions[0]) && "Table isn't big enough!");
967 OpActions[(unsigned)VT.SimpleTy][Op] = (uint8_t)Action;
970 /// Indicate that the specified load with extension does not work with the
971 /// specified type and indicate what to do about it.
972 void setLoadExtAction(unsigned ExtType, MVT VT,
973 LegalizeAction Action) {
974 assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
975 "Table isn't big enough!");
976 LoadExtActions[VT.SimpleTy][ExtType] = (uint8_t)Action;
979 /// Indicate that the specified truncating store does not work with the
980 /// specified type and indicate what to do about it.
981 void setTruncStoreAction(MVT ValVT, MVT MemVT,
982 LegalizeAction Action) {
983 assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
984 "Table isn't big enough!");
985 TruncStoreActions[ValVT.SimpleTy][MemVT.SimpleTy] = (uint8_t)Action;
988 /// Indicate that the specified indexed load does or does not work with the
989 /// specified type and indicate what to do abort it.
991 /// NOTE: All indexed mode loads are initialized to Expand in
992 /// TargetLowering.cpp
993 void setIndexedLoadAction(unsigned IdxMode, MVT VT,
994 LegalizeAction Action) {
995 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
996 (unsigned)Action < 0xf && "Table isn't big enough!");
997 // Load action are kept in the upper half.
998 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0xf0;
999 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action) <<4;
1002 /// Indicate that the specified indexed store does or does not work with the
1003 /// specified type and indicate what to do about it.
1005 /// NOTE: All indexed mode stores are initialized to Expand in
1006 /// TargetLowering.cpp
1007 void setIndexedStoreAction(unsigned IdxMode, MVT VT,
1008 LegalizeAction Action) {
1009 assert(VT < MVT::LAST_VALUETYPE && IdxMode < ISD::LAST_INDEXED_MODE &&
1010 (unsigned)Action < 0xf && "Table isn't big enough!");
1011 // Store action are kept in the lower half.
1012 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] &= ~0x0f;
1013 IndexedModeActions[(unsigned)VT.SimpleTy][IdxMode] |= ((uint8_t)Action);
1016 /// Indicate that the specified condition code is or isn't supported on the
1017 /// target and indicate what to do about it.
1018 void setCondCodeAction(ISD::CondCode CC, MVT VT,
1019 LegalizeAction Action) {
1020 assert(VT < MVT::LAST_VALUETYPE &&
1021 (unsigned)CC < array_lengthof(CondCodeActions) &&
1022 "Table isn't big enough!");
1023 /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
1024 /// value and the upper 27 bits index into the second dimension of the
1025 /// array to select what 64bit value to use.
1026 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1027 &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
1028 CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
1029 |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
1032 /// If Opc/OrigVT is specified as being promoted, the promotion code defaults
1033 /// to trying a larger integer/fp until it can find one that works. If that
1034 /// default is insufficient, this method can be used by the target to override
1036 void AddPromotedToType(unsigned Opc, MVT OrigVT, MVT DestVT) {
1037 PromoteToType[std::make_pair(Opc, OrigVT.SimpleTy)] = DestVT.SimpleTy;
1040 /// Targets should invoke this method for each target independent node that
1041 /// they want to provide a custom DAG combiner for by implementing the
1042 /// PerformDAGCombine virtual method.
1043 void setTargetDAGCombine(ISD::NodeType NT) {
1044 assert(unsigned(NT >> 3) < array_lengthof(TargetDAGCombineArray));
1045 TargetDAGCombineArray[NT >> 3] |= 1 << (NT&7);
1048 /// Set the target's required jmp_buf buffer size (in bytes); default is 200
1049 void setJumpBufSize(unsigned Size) {
1053 /// Set the target's required jmp_buf buffer alignment (in bytes); default is
1055 void setJumpBufAlignment(unsigned Align) {
1056 JumpBufAlignment = Align;
1059 /// Set the target's minimum function alignment (in log2(bytes))
1060 void setMinFunctionAlignment(unsigned Align) {
1061 MinFunctionAlignment = Align;
1064 /// Set the target's preferred function alignment. This should be set if
1065 /// there is a performance benefit to higher-than-minimum alignment (in
1067 void setPrefFunctionAlignment(unsigned Align) {
1068 PrefFunctionAlignment = Align;
1071 /// Set the target's preferred loop alignment. Default alignment is zero, it
1072 /// means the target does not care about loop alignment. The alignment is
1073 /// specified in log2(bytes).
1074 void setPrefLoopAlignment(unsigned Align) {
1075 PrefLoopAlignment = Align;
1078 /// Set the minimum stack alignment of an argument (in log2(bytes)).
1079 void setMinStackArgumentAlignment(unsigned Align) {
1080 MinStackArgumentAlignment = Align;
1083 /// Set if the DAG builder should automatically insert fences and reduce the
1084 /// order of atomic memory operations to Monotonic.
1085 void setInsertFencesForAtomic(bool fence) {
1086 InsertFencesForAtomic = fence;
1090 //===--------------------------------------------------------------------===//
1091 // Addressing mode description hooks (used by LSR etc).
1094 /// CodeGenPrepare sinks address calculations into the same BB as Load/Store
1095 /// instructions reading the address. This allows as much computation as
1096 /// possible to be done in the address mode for that operand. This hook lets
1097 /// targets also pass back when this should be done on intrinsics which
1099 virtual bool GetAddrModeArguments(IntrinsicInst * /*I*/,
1100 SmallVectorImpl<Value*> &/*Ops*/,
1101 Type *&/*AccessTy*/) const {
1105 /// This represents an addressing mode of:
1106 /// BaseGV + BaseOffs + BaseReg + Scale*ScaleReg
1107 /// If BaseGV is null, there is no BaseGV.
1108 /// If BaseOffs is zero, there is no base offset.
1109 /// If HasBaseReg is false, there is no base register.
1110 /// If Scale is zero, there is no ScaleReg. Scale of 1 indicates a reg with
1113 GlobalValue *BaseGV;
1117 AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
1120 /// Return true if the addressing mode represented by AM is legal for this
1121 /// target, for a load/store of the specified type.
1123 /// The type may be VoidTy, in which case only return true if the addressing
1124 /// mode is legal for a load/store of any legal type. TODO: Handle
1125 /// pre/postinc as well.
1126 virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const;
1128 /// \brief Return the cost of the scaling factor used in the addressing mode
1129 /// represented by AM for this target, for a load/store of the specified type.
1131 /// If the AM is supported, the return value must be >= 0.
1132 /// If the AM is not supported, it returns a negative value.
1133 /// TODO: Handle pre/postinc as well.
1134 virtual int getScalingFactorCost(const AddrMode &AM, Type *Ty) const {
1135 // Default: assume that any scaling factor used in a legal AM is free.
1136 if (isLegalAddressingMode(AM, Ty)) return 0;
1140 /// Return true if the specified immediate is legal icmp immediate, that is
1141 /// the target has icmp instructions which can compare a register against the
1142 /// immediate without having to materialize the immediate into a register.
1143 virtual bool isLegalICmpImmediate(int64_t) const {
1147 /// Return true if the specified immediate is legal add immediate, that is the
1148 /// target has add instructions which can add a register with the immediate
1149 /// without having to materialize the immediate into a register.
1150 virtual bool isLegalAddImmediate(int64_t) const {
1154 /// Return true if it's free to truncate a value of type Ty1 to type
1155 /// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
1156 /// by referencing its sub-register AX.
1157 virtual bool isTruncateFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1161 /// Return true if a truncation from Ty1 to Ty2 is permitted when deciding
1162 /// whether a call is in tail position. Typically this means that both results
1163 /// would be assigned to the same register or stack slot, but it could mean
1164 /// the target performs adequate checks of its own before proceeding with the
1166 virtual bool allowTruncateForTailCall(Type * /*Ty1*/, Type * /*Ty2*/) const {
1170 virtual bool isTruncateFree(EVT /*VT1*/, EVT /*VT2*/) const {
1174 /// Return true if any actual instruction that defines a value of type Ty1
1175 /// implicitly zero-extends the value to Ty2 in the result register.
1177 /// This does not necessarily include registers defined in unknown ways, such
1178 /// as incoming arguments, or copies from unknown virtual registers. Also, if
1179 /// isTruncateFree(Ty2, Ty1) is true, this does not necessarily apply to
1180 /// truncate instructions. e.g. on x86-64, all instructions that define 32-bit
1181 /// values implicit zero-extend the result out to 64 bits.
1182 virtual bool isZExtFree(Type * /*Ty1*/, Type * /*Ty2*/) const {
1186 virtual bool isZExtFree(EVT /*VT1*/, EVT /*VT2*/) const {
1190 /// Return true if the target supplies and combines to a paired load
1191 /// two loaded values of type LoadedType next to each other in memory.
1192 /// RequiredAlignment gives the minimal alignment constraints that must be met
1193 /// to be able to select this paired load.
1195 /// This information is *not* used to generate actual paired loads, but it is
1196 /// used to generate a sequence of loads that is easier to combine into a
1198 /// For instance, something like this:
1199 /// a = load i64* addr
1200 /// b = trunc i64 a to i32
1201 /// c = lshr i64 a, 32
1202 /// d = trunc i64 c to i32
1203 /// will be optimized into:
1204 /// b = load i32* addr1
1205 /// d = load i32* addr2
1206 /// Where addr1 = addr2 +/- sizeof(i32).
1208 /// In other words, unless the target performs a post-isel load combining,
1209 /// this information should not be provided because it will generate more
1211 virtual bool hasPairedLoad(Type * /*LoadedType*/,
1212 unsigned & /*RequiredAligment*/) const {
1216 virtual bool hasPairedLoad(EVT /*LoadedType*/,
1217 unsigned & /*RequiredAligment*/) const {
1221 /// Return true if zero-extending the specific node Val to type VT2 is free
1222 /// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
1223 /// because it's folded such as X86 zero-extending loads).
1224 virtual bool isZExtFree(SDValue Val, EVT VT2) const {
1225 return isZExtFree(Val.getValueType(), VT2);
1228 /// Return true if an fneg operation is free to the point where it is never
1229 /// worthwhile to replace it with a bitwise operation.
1230 virtual bool isFNegFree(EVT VT) const {
1231 assert(VT.isFloatingPoint());
1235 /// Return true if an fabs operation is free to the point where it is never
1236 /// worthwhile to replace it with a bitwise operation.
1237 virtual bool isFAbsFree(EVT VT) const {
1238 assert(VT.isFloatingPoint());
1242 /// Return true if an FMA operation is faster than a pair of fmul and fadd
1243 /// instructions. fmuladd intrinsics will be expanded to FMAs when this method
1244 /// returns true, otherwise fmuladd is expanded to fmul + fadd.
1246 /// NOTE: This may be called before legalization on types for which FMAs are
1247 /// not legal, but should return true if those types will eventually legalize
1248 /// to types that support FMAs. After legalization, it will only be called on
1249 /// types that support FMAs (via Legal or Custom actions)
1250 virtual bool isFMAFasterThanFMulAndFAdd(EVT) const {
1254 /// Return true if it's profitable to narrow operations of type VT1 to
1255 /// VT2. e.g. on x86, it's profitable to narrow from i32 to i8 but not from
1257 virtual bool isNarrowingProfitable(EVT /*VT1*/, EVT /*VT2*/) const {
1261 //===--------------------------------------------------------------------===//
1262 // Runtime Library hooks
1265 /// Rename the default libcall routine name for the specified libcall.
1266 void setLibcallName(RTLIB::Libcall Call, const char *Name) {
1267 LibcallRoutineNames[Call] = Name;
1270 /// Get the libcall routine name for the specified libcall.
1271 const char *getLibcallName(RTLIB::Libcall Call) const {
1272 return LibcallRoutineNames[Call];
1275 /// Override the default CondCode to be used to test the result of the
1276 /// comparison libcall against zero.
1277 void setCmpLibcallCC(RTLIB::Libcall Call, ISD::CondCode CC) {
1278 CmpLibcallCCs[Call] = CC;
1281 /// Get the CondCode that's to be used to test the result of the comparison
1282 /// libcall against zero.
1283 ISD::CondCode getCmpLibcallCC(RTLIB::Libcall Call) const {
1284 return CmpLibcallCCs[Call];
1287 /// Set the CallingConv that should be used for the specified libcall.
1288 void setLibcallCallingConv(RTLIB::Libcall Call, CallingConv::ID CC) {
1289 LibcallCallingConvs[Call] = CC;
1292 /// Get the CallingConv that should be used for the specified libcall.
1293 CallingConv::ID getLibcallCallingConv(RTLIB::Libcall Call) const {
1294 return LibcallCallingConvs[Call];
1298 const TargetMachine &TM;
1299 const DataLayout *TD;
1300 const TargetLoweringObjectFile &TLOF;
1302 /// The type to use for pointers for the default address space, usually i32 or
1306 /// True if this is a little endian target.
1307 bool IsLittleEndian;
1309 /// Tells the code generator not to expand operations into sequences that use
1310 /// the select operations if possible.
1311 bool SelectIsExpensive;
1313 /// Tells the code generator not to expand integer divides by constants into a
1314 /// sequence of muls, adds, and shifts. This is a hack until a real cost
1315 /// model is in place. If we ever optimize for size, this will be set to true
1316 /// unconditionally.
1319 /// Tells the code generator to bypass slow divide or remainder
1320 /// instructions. For example, BypassSlowDivWidths[32,8] tells the code
1321 /// generator to bypass 32-bit integer div/rem with an 8-bit unsigned integer
1322 /// div/rem when the operands are positive and less than 256.
1323 DenseMap <unsigned int, unsigned int> BypassSlowDivWidths;
1325 /// Tells the code generator that it shouldn't generate srl/add/sra for a
1326 /// signed divide by power of two, and let the target handle it.
1327 bool Pow2DivIsCheap;
1329 /// Tells the code generator that it shouldn't generate extra flow control
1330 /// instructions and should attempt to combine flow control instructions via
1332 bool JumpIsExpensive;
1334 /// This target prefers to use _setjmp to implement llvm.setjmp.
1336 /// Defaults to false.
1337 bool UseUnderscoreSetJmp;
1339 /// This target prefers to use _longjmp to implement llvm.longjmp.
1341 /// Defaults to false.
1342 bool UseUnderscoreLongJmp;
1344 /// Whether the target can generate code for jumptables. If it's not true,
1345 /// then each jumptable must be lowered into if-then-else's.
1346 bool SupportJumpTables;
1348 /// Number of blocks threshold to use jump tables.
1349 int MinimumJumpTableEntries;
1351 /// Information about the contents of the high-bits in boolean values held in
1352 /// a type wider than i1. See getBooleanContents.
1353 BooleanContent BooleanContents;
1355 /// Information about the contents of the high-bits in boolean vector values
1356 /// when the element type is wider than i1. See getBooleanContents.
1357 BooleanContent BooleanVectorContents;
1359 /// The target scheduling preference: shortest possible total cycles or lowest
1361 Sched::Preference SchedPreferenceInfo;
1363 /// The size, in bytes, of the target's jmp_buf buffers
1364 unsigned JumpBufSize;
1366 /// The alignment, in bytes, of the target's jmp_buf buffers
1367 unsigned JumpBufAlignment;
1369 /// The minimum alignment that any argument on the stack needs to have.
1370 unsigned MinStackArgumentAlignment;
1372 /// The minimum function alignment (used when optimizing for size, and to
1373 /// prevent explicitly provided alignment from leading to incorrect code).
1374 unsigned MinFunctionAlignment;
1376 /// The preferred function alignment (used when alignment unspecified and
1377 /// optimizing for speed).
1378 unsigned PrefFunctionAlignment;
1380 /// The preferred loop alignment.
1381 unsigned PrefLoopAlignment;
1383 /// Whether the DAG builder should automatically insert fences and reduce
1384 /// ordering for atomics. (This will be set for for most architectures with
1385 /// weak memory ordering.)
1386 bool InsertFencesForAtomic;
1388 /// If set to a physical register, this specifies the register that
1389 /// llvm.savestack/llvm.restorestack should save and restore.
1390 unsigned StackPointerRegisterToSaveRestore;
1392 /// If set to a physical register, this specifies the register that receives
1393 /// the exception address on entry to a landing pad.
1394 unsigned ExceptionPointerRegister;
1396 /// If set to a physical register, this specifies the register that receives
1397 /// the exception typeid on entry to a landing pad.
1398 unsigned ExceptionSelectorRegister;
1400 /// This indicates the default register class to use for each ValueType the
1401 /// target supports natively.
1402 const TargetRegisterClass *RegClassForVT[MVT::LAST_VALUETYPE];
1403 unsigned char NumRegistersForVT[MVT::LAST_VALUETYPE];
1404 MVT RegisterTypeForVT[MVT::LAST_VALUETYPE];
1406 /// This indicates the "representative" register class to use for each
1407 /// ValueType the target supports natively. This information is used by the
1408 /// scheduler to track register pressure. By default, the representative
1409 /// register class is the largest legal super-reg register class of the
1410 /// register class of the specified type. e.g. On x86, i8, i16, and i32's
1411 /// representative class would be GR32.
1412 const TargetRegisterClass *RepRegClassForVT[MVT::LAST_VALUETYPE];
1414 /// This indicates the "cost" of the "representative" register class for each
1415 /// ValueType. The cost is used by the scheduler to approximate register
1417 uint8_t RepRegClassCostForVT[MVT::LAST_VALUETYPE];
1419 /// For any value types we are promoting or expanding, this contains the value
1420 /// type that we are changing to. For Expanded types, this contains one step
1421 /// of the expand (e.g. i64 -> i32), even if there are multiple steps required
1422 /// (e.g. i64 -> i16). For types natively supported by the system, this holds
1423 /// the same type (e.g. i32 -> i32).
1424 MVT TransformToType[MVT::LAST_VALUETYPE];
1426 /// For each operation and each value type, keep a LegalizeAction that
1427 /// indicates how instruction selection should deal with the operation. Most
1428 /// operations are Legal (aka, supported natively by the target), but
1429 /// operations that are not should be described. Note that operations on
1430 /// non-legal value types are not described here.
1431 uint8_t OpActions[MVT::LAST_VALUETYPE][ISD::BUILTIN_OP_END];
1433 /// For each load extension type and each value type, keep a LegalizeAction
1434 /// that indicates how instruction selection should deal with a load of a
1435 /// specific value type and extension type.
1436 uint8_t LoadExtActions[MVT::LAST_VALUETYPE][ISD::LAST_LOADEXT_TYPE];
1438 /// For each value type pair keep a LegalizeAction that indicates whether a
1439 /// truncating store of a specific value type and truncating type is legal.
1440 uint8_t TruncStoreActions[MVT::LAST_VALUETYPE][MVT::LAST_VALUETYPE];
1442 /// For each indexed mode and each value type, keep a pair of LegalizeAction
1443 /// that indicates how instruction selection should deal with the load /
1446 /// The first dimension is the value_type for the reference. The second
1447 /// dimension represents the various modes for load store.
1448 uint8_t IndexedModeActions[MVT::LAST_VALUETYPE][ISD::LAST_INDEXED_MODE];
1450 /// For each condition code (ISD::CondCode) keep a LegalizeAction that
1451 /// indicates how instruction selection should deal with the condition code.
1453 /// Because each CC action takes up 2 bits, we need to have the array size be
1454 /// large enough to fit all of the value types. This can be done by dividing
1455 /// the MVT::LAST_VALUETYPE by 32 and adding one.
1456 uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
1458 ValueTypeActionImpl ValueTypeActions;
1462 getTypeConversion(LLVMContext &Context, EVT VT) const {
1463 // If this is a simple type, use the ComputeRegisterProp mechanism.
1464 if (VT.isSimple()) {
1465 MVT SVT = VT.getSimpleVT();
1466 assert((unsigned)SVT.SimpleTy < array_lengthof(TransformToType));
1467 MVT NVT = TransformToType[SVT.SimpleTy];
1468 LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
1472 ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
1473 && "Promote may not follow Expand or Promote");
1475 if (LA == TypeSplitVector)
1476 return LegalizeKind(LA, EVT::getVectorVT(Context,
1477 SVT.getVectorElementType(),
1478 SVT.getVectorNumElements()/2));
1479 if (LA == TypeScalarizeVector)
1480 return LegalizeKind(LA, SVT.getVectorElementType());
1481 return LegalizeKind(LA, NVT);
1484 // Handle Extended Scalar Types.
1485 if (!VT.isVector()) {
1486 assert(VT.isInteger() && "Float types must be simple");
1487 unsigned BitSize = VT.getSizeInBits();
1488 // First promote to a power-of-two size, then expand if necessary.
1489 if (BitSize < 8 || !isPowerOf2_32(BitSize)) {
1490 EVT NVT = VT.getRoundIntegerType(Context);
1491 assert(NVT != VT && "Unable to round integer VT");
1492 LegalizeKind NextStep = getTypeConversion(Context, NVT);
1493 // Avoid multi-step promotion.
1494 if (NextStep.first == TypePromoteInteger) return NextStep;
1495 // Return rounded integer type.
1496 return LegalizeKind(TypePromoteInteger, NVT);
1499 return LegalizeKind(TypeExpandInteger,
1500 EVT::getIntegerVT(Context, VT.getSizeInBits()/2));
1503 // Handle vector types.
1504 unsigned NumElts = VT.getVectorNumElements();
1505 EVT EltVT = VT.getVectorElementType();
1507 // Vectors with only one element are always scalarized.
1509 return LegalizeKind(TypeScalarizeVector, EltVT);
1511 // Try to widen vector elements until the element type is a power of two and
1512 // promote it to a legal type later on, for example:
1513 // <3 x i8> -> <4 x i8> -> <4 x i32>
1514 if (EltVT.isInteger()) {
1515 // Vectors with a number of elements that is not a power of two are always
1516 // widened, for example <3 x i8> -> <4 x i8>.
1517 if (!VT.isPow2VectorType()) {
1518 NumElts = (unsigned)NextPowerOf2(NumElts);
1519 EVT NVT = EVT::getVectorVT(Context, EltVT, NumElts);
1520 return LegalizeKind(TypeWidenVector, NVT);
1523 // Examine the element type.
1524 LegalizeKind LK = getTypeConversion(Context, EltVT);
1526 // If type is to be expanded, split the vector.
1527 // <4 x i140> -> <2 x i140>
1528 if (LK.first == TypeExpandInteger)
1529 return LegalizeKind(TypeSplitVector,
1530 EVT::getVectorVT(Context, EltVT, NumElts / 2));
1532 // Promote the integer element types until a legal vector type is found
1533 // or until the element integer type is too big. If a legal type was not
1534 // found, fallback to the usual mechanism of widening/splitting the
1536 EVT OldEltVT = EltVT;
1538 // Increase the bitwidth of the element to the next pow-of-two
1539 // (which is greater than 8 bits).
1540 EltVT = EVT::getIntegerVT(Context, 1 + EltVT.getSizeInBits()
1541 ).getRoundIntegerType(Context);
1543 // Stop trying when getting a non-simple element type.
1544 // Note that vector elements may be greater than legal vector element
1545 // types. Example: X86 XMM registers hold 64bit element on 32bit
1547 if (!EltVT.isSimple()) break;
1549 // Build a new vector type and check if it is legal.
1550 MVT NVT = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1551 // Found a legal promoted vector type.
1552 if (NVT != MVT() && ValueTypeActions.getTypeAction(NVT) == TypeLegal)
1553 return LegalizeKind(TypePromoteInteger,
1554 EVT::getVectorVT(Context, EltVT, NumElts));
1557 // Reset the type to the unexpanded type if we did not find a legal vector
1558 // type with a promoted vector element type.
1562 // Try to widen the vector until a legal type is found.
1563 // If there is no wider legal type, split the vector.
1565 // Round up to the next power of 2.
1566 NumElts = (unsigned)NextPowerOf2(NumElts);
1568 // If there is no simple vector type with this many elements then there
1569 // cannot be a larger legal vector type. Note that this assumes that
1570 // there are no skipped intermediate vector types in the simple types.
1571 if (!EltVT.isSimple()) break;
1572 MVT LargerVector = MVT::getVectorVT(EltVT.getSimpleVT(), NumElts);
1573 if (LargerVector == MVT()) break;
1575 // If this type is legal then widen the vector.
1576 if (ValueTypeActions.getTypeAction(LargerVector) == TypeLegal)
1577 return LegalizeKind(TypeWidenVector, LargerVector);
1580 // Widen odd vectors to next power of two.
1581 if (!VT.isPow2VectorType()) {
1582 EVT NVT = VT.getPow2VectorType(Context);
1583 return LegalizeKind(TypeWidenVector, NVT);
1586 // Vectors with illegal element types are expanded.
1587 EVT NVT = EVT::getVectorVT(Context, EltVT, VT.getVectorNumElements() / 2);
1588 return LegalizeKind(TypeSplitVector, NVT);
1592 std::vector<std::pair<MVT, const TargetRegisterClass*> > AvailableRegClasses;
1594 /// Targets can specify ISD nodes that they would like PerformDAGCombine
1595 /// callbacks for by calling setTargetDAGCombine(), which sets a bit in this
1598 TargetDAGCombineArray[(ISD::BUILTIN_OP_END+CHAR_BIT-1)/CHAR_BIT];
1600 /// For operations that must be promoted to a specific type, this holds the
1601 /// destination type. This map should be sparse, so don't hold it as an
1604 /// Targets add entries to this map with AddPromotedToType(..), clients access
1605 /// this with getTypeToPromoteTo(..).
1606 std::map<std::pair<unsigned, MVT::SimpleValueType>, MVT::SimpleValueType>
1609 /// Stores the name each libcall.
1610 const char *LibcallRoutineNames[RTLIB::UNKNOWN_LIBCALL];
1612 /// The ISD::CondCode that should be used to test the result of each of the
1613 /// comparison libcall against zero.
1614 ISD::CondCode CmpLibcallCCs[RTLIB::UNKNOWN_LIBCALL];
1616 /// Stores the CallingConv that should be used for each libcall.
1617 CallingConv::ID LibcallCallingConvs[RTLIB::UNKNOWN_LIBCALL];
1620 /// \brief Specify maximum number of store instructions per memset call.
1622 /// When lowering \@llvm.memset this field specifies the maximum number of
1623 /// store operations that may be substituted for the call to memset. Targets
1624 /// must set this value based on the cost threshold for that target. Targets
1625 /// should assume that the memset will be done using as many of the largest
1626 /// store operations first, followed by smaller ones, if necessary, per
1627 /// alignment restrictions. For example, storing 9 bytes on a 32-bit machine
1628 /// with 16-bit alignment would result in four 2-byte stores and one 1-byte
1629 /// store. This only applies to setting a constant array of a constant size.
1630 unsigned MaxStoresPerMemset;
1632 /// Maximum number of stores operations that may be substituted for the call
1633 /// to memset, used for functions with OptSize attribute.
1634 unsigned MaxStoresPerMemsetOptSize;
1636 /// \brief Specify maximum bytes of store instructions per memcpy call.
1638 /// When lowering \@llvm.memcpy this field specifies the maximum number of
1639 /// store operations that may be substituted for a call to memcpy. Targets
1640 /// must set this value based on the cost threshold for that target. Targets
1641 /// should assume that the memcpy will be done using as many of the largest
1642 /// store operations first, followed by smaller ones, if necessary, per
1643 /// alignment restrictions. For example, storing 7 bytes on a 32-bit machine
1644 /// with 32-bit alignment would result in one 4-byte store, a one 2-byte store
1645 /// and one 1-byte store. This only applies to copying a constant array of
1647 unsigned MaxStoresPerMemcpy;
1649 /// Maximum number of store operations that may be substituted for a call to
1650 /// memcpy, used for functions with OptSize attribute.
1651 unsigned MaxStoresPerMemcpyOptSize;
1653 /// \brief Specify maximum bytes of store instructions per memmove call.
1655 /// When lowering \@llvm.memmove this field specifies the maximum number of
1656 /// store instructions that may be substituted for a call to memmove. Targets
1657 /// must set this value based on the cost threshold for that target. Targets
1658 /// should assume that the memmove will be done using as many of the largest
1659 /// store operations first, followed by smaller ones, if necessary, per
1660 /// alignment restrictions. For example, moving 9 bytes on a 32-bit machine
1661 /// with 8-bit alignment would result in nine 1-byte stores. This only
1662 /// applies to copying a constant array of constant size.
1663 unsigned MaxStoresPerMemmove;
1665 /// Maximum number of store instructions that may be substituted for a call to
1666 /// memmove, used for functions with OpSize attribute.
1667 unsigned MaxStoresPerMemmoveOptSize;
1669 /// Tells the code generator that select is more expensive than a branch if
1670 /// the branch is usually predicted right.
1671 bool PredictableSelectIsExpensive;
1674 /// Return true if the value types that can be represented by the specified
1675 /// register class are all legal.
1676 bool isLegalRC(const TargetRegisterClass *RC) const;
1679 /// This class defines information used to lower LLVM code to legal SelectionDAG
1680 /// operators that the target instruction selector can accept natively.
1682 /// This class also defines callbacks that targets must implement to lower
1683 /// target-specific constructs to SelectionDAG operators.
1684 class TargetLowering : public TargetLoweringBase {
1685 TargetLowering(const TargetLowering&) LLVM_DELETED_FUNCTION;
1686 void operator=(const TargetLowering&) LLVM_DELETED_FUNCTION;
1689 /// NOTE: The constructor takes ownership of TLOF.
1690 explicit TargetLowering(const TargetMachine &TM,
1691 const TargetLoweringObjectFile *TLOF);
1693 /// Returns true by value, base pointer and offset pointer and addressing mode
1694 /// by reference if the node's address can be legally represented as
1695 /// pre-indexed load / store address.
1696 virtual bool getPreIndexedAddressParts(SDNode * /*N*/, SDValue &/*Base*/,
1697 SDValue &/*Offset*/,
1698 ISD::MemIndexedMode &/*AM*/,
1699 SelectionDAG &/*DAG*/) const {
1703 /// Returns true by value, base pointer and offset pointer and addressing mode
1704 /// by reference if this node can be combined with a load / store to form a
1705 /// post-indexed load / store.
1706 virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
1708 SDValue &/*Offset*/,
1709 ISD::MemIndexedMode &/*AM*/,
1710 SelectionDAG &/*DAG*/) const {
1714 /// Return the entry encoding for a jump table in the current function. The
1715 /// returned value is a member of the MachineJumpTableInfo::JTEntryKind enum.
1716 virtual unsigned getJumpTableEncoding() const;
1718 virtual const MCExpr *
1719 LowerCustomJumpTableEntry(const MachineJumpTableInfo * /*MJTI*/,
1720 const MachineBasicBlock * /*MBB*/, unsigned /*uid*/,
1721 MCContext &/*Ctx*/) const {
1722 llvm_unreachable("Need to implement this hook if target has custom JTIs");
1725 /// Returns relocation base for the given PIC jumptable.
1726 virtual SDValue getPICJumpTableRelocBase(SDValue Table,
1727 SelectionDAG &DAG) const;
1729 /// This returns the relocation base for the given PIC jumptable, the same as
1730 /// getPICJumpTableRelocBase, but as an MCExpr.
1731 virtual const MCExpr *
1732 getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
1733 unsigned JTI, MCContext &Ctx) const;
1735 /// Return true if folding a constant offset with the given GlobalAddress is
1736 /// legal. It is frequently not legal in PIC relocation models.
1737 virtual bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const;
1739 bool isInTailCallPosition(SelectionDAG &DAG, SDNode *Node,
1740 SDValue &Chain) const;
1742 void softenSetCCOperands(SelectionDAG &DAG, EVT VT,
1743 SDValue &NewLHS, SDValue &NewRHS,
1744 ISD::CondCode &CCCode, SDLoc DL) const;
1746 /// Returns a pair of (return value, chain).
1747 std::pair<SDValue, SDValue> makeLibCall(SelectionDAG &DAG, RTLIB::Libcall LC,
1748 EVT RetVT, const SDValue *Ops,
1749 unsigned NumOps, bool isSigned,
1750 SDLoc dl, bool doesNotReturn = false,
1751 bool isReturnValueUsed = true) const;
1753 //===--------------------------------------------------------------------===//
1754 // TargetLowering Optimization Methods
1757 /// A convenience struct that encapsulates a DAG, and two SDValues for
1758 /// returning information from TargetLowering to its clients that want to
1760 struct TargetLoweringOpt {
1767 explicit TargetLoweringOpt(SelectionDAG &InDAG,
1769 DAG(InDAG), LegalTys(LT), LegalOps(LO) {}
1771 bool LegalTypes() const { return LegalTys; }
1772 bool LegalOperations() const { return LegalOps; }
1774 bool CombineTo(SDValue O, SDValue N) {
1780 /// Check to see if the specified operand of the specified instruction is a
1781 /// constant integer. If so, check to see if there are any bits set in the
1782 /// constant that are not demanded. If so, shrink the constant and return
1784 bool ShrinkDemandedConstant(SDValue Op, const APInt &Demanded);
1786 /// Convert x+y to (VT)((SmallVT)x+(SmallVT)y) if the casts are free. This
1787 /// uses isZExtFree and ZERO_EXTEND for the widening cast, but it could be
1788 /// generalized for targets with other types of implicit widening casts.
1789 bool ShrinkDemandedOp(SDValue Op, unsigned BitWidth, const APInt &Demanded,
1793 /// Look at Op. At this point, we know that only the DemandedMask bits of the
1794 /// result of Op are ever used downstream. If we can use this information to
1795 /// simplify Op, create a new simplified DAG node and return true, returning
1796 /// the original and new nodes in Old and New. Otherwise, analyze the
1797 /// expression and return a mask of KnownOne and KnownZero bits for the
1798 /// expression (used to simplify the caller). The KnownZero/One bits may only
1799 /// be accurate for those bits in the DemandedMask.
1800 bool SimplifyDemandedBits(SDValue Op, const APInt &DemandedMask,
1801 APInt &KnownZero, APInt &KnownOne,
1802 TargetLoweringOpt &TLO, unsigned Depth = 0) const;
1804 /// Determine which of the bits specified in Mask are known to be either zero
1805 /// or one and return them in the KnownZero/KnownOne bitsets.
1806 virtual void computeMaskedBitsForTargetNode(const SDValue Op,
1809 const SelectionDAG &DAG,
1810 unsigned Depth = 0) const;
1812 /// This method can be implemented by targets that want to expose additional
1813 /// information about sign bits to the DAG Combiner.
1814 virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
1815 unsigned Depth = 0) const;
1817 struct DAGCombinerInfo {
1818 void *DC; // The DAG Combiner object.
1820 bool CalledByLegalizer;
1824 DAGCombinerInfo(SelectionDAG &dag, CombineLevel level, bool cl, void *dc)
1825 : DC(dc), Level(level), CalledByLegalizer(cl), DAG(dag) {}
1827 bool isBeforeLegalize() const { return Level == BeforeLegalizeTypes; }
1828 bool isBeforeLegalizeOps() const { return Level < AfterLegalizeVectorOps; }
1829 bool isAfterLegalizeVectorOps() const {
1830 return Level == AfterLegalizeDAG;
1832 CombineLevel getDAGCombineLevel() { return Level; }
1833 bool isCalledByLegalizer() const { return CalledByLegalizer; }
1835 void AddToWorklist(SDNode *N);
1836 void RemoveFromWorklist(SDNode *N);
1837 SDValue CombineTo(SDNode *N, const std::vector<SDValue> &To,
1839 SDValue CombineTo(SDNode *N, SDValue Res, bool AddTo = true);
1840 SDValue CombineTo(SDNode *N, SDValue Res0, SDValue Res1, bool AddTo = true);
1842 void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
1845 /// Try to simplify a setcc built with the specified operands and cc. If it is
1846 /// unable to simplify it, return a null SDValue.
1847 SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
1848 ISD::CondCode Cond, bool foldBooleans,
1849 DAGCombinerInfo &DCI, SDLoc dl) const;
1851 /// Returns true (and the GlobalValue and the offset) if the node is a
1852 /// GlobalAddress + offset.
1854 isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
1856 /// This method will be invoked for all target nodes and for any
1857 /// target-independent nodes that the target has registered with invoke it
1860 /// The semantics are as follows:
1862 /// SDValue.Val == 0 - No change was made
1863 /// SDValue.Val == N - N was replaced, is dead, and is already handled.
1864 /// otherwise - N should be replaced by the returned Operand.
1866 /// In addition, methods provided by DAGCombinerInfo may be used to perform
1867 /// more complex transformations.
1869 virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
1871 /// Return true if the target has native support for the specified value type
1872 /// and it is 'desirable' to use the type for the given node type. e.g. On x86
1873 /// i16 is legal, but undesirable since i16 instruction encodings are longer
1874 /// and some i16 instructions are slow.
1875 virtual bool isTypeDesirableForOp(unsigned /*Opc*/, EVT VT) const {
1876 // By default, assume all legal types are desirable.
1877 return isTypeLegal(VT);
1880 /// Return true if it is profitable for dag combiner to transform a floating
1881 /// point op of specified opcode to a equivalent op of an integer
1882 /// type. e.g. f32 load -> i32 load can be profitable on ARM.
1883 virtual bool isDesirableToTransformToIntegerOp(unsigned /*Opc*/,
1888 /// This method query the target whether it is beneficial for dag combiner to
1889 /// promote the specified node. If true, it should return the desired
1890 /// promotion type by reference.
1891 virtual bool IsDesirableToPromoteOp(SDValue /*Op*/, EVT &/*PVT*/) const {
1895 //===--------------------------------------------------------------------===//
1896 // Lowering methods - These methods must be implemented by targets so that
1897 // the SelectionDAGBuilder code knows how to lower these.
1900 /// This hook must be implemented to lower the incoming (formal) arguments,
1901 /// described by the Ins array, into the specified DAG. The implementation
1902 /// should fill in the InVals array with legal-type argument values, and
1903 /// return the resulting token chain value.
1906 LowerFormalArguments(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
1908 const SmallVectorImpl<ISD::InputArg> &/*Ins*/,
1909 SDLoc /*dl*/, SelectionDAG &/*DAG*/,
1910 SmallVectorImpl<SDValue> &/*InVals*/) const {
1911 llvm_unreachable("Not Implemented");
1914 struct ArgListEntry {
1923 bool isReturned : 1;
1926 ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
1927 isSRet(false), isNest(false), isByVal(false), isReturned(false),
1930 typedef std::vector<ArgListEntry> ArgListTy;
1932 /// This structure contains all information that is necessary for lowering
1933 /// calls. It is passed to TLI::LowerCallTo when the SelectionDAG builder
1934 /// needs to lower a call, and targets will see this struct in their LowerCall
1936 struct CallLoweringInfo {
1943 bool DoesNotReturn : 1;
1944 bool IsReturnValueUsed : 1;
1946 // IsTailCall should be modified by implementations of
1947 // TargetLowering::LowerCall that perform tail call conversions.
1950 unsigned NumFixedArgs;
1951 CallingConv::ID CallConv;
1956 ImmutableCallSite *CS;
1957 SmallVector<ISD::OutputArg, 32> Outs;
1958 SmallVector<SDValue, 32> OutVals;
1959 SmallVector<ISD::InputArg, 32> Ins;
1962 /// Constructs a call lowering context based on the ImmutableCallSite \p cs.
1963 CallLoweringInfo(SDValue chain, Type *retTy,
1964 FunctionType *FTy, bool isTailCall, SDValue callee,
1965 ArgListTy &args, SelectionDAG &dag, SDLoc dl,
1966 ImmutableCallSite &cs)
1967 : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
1968 RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
1969 IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
1970 DoesNotReturn(cs.doesNotReturn()),
1971 IsReturnValueUsed(!cs.getInstruction()->use_empty()),
1972 IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
1973 CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
1976 /// Constructs a call lowering context based on the provided call
1978 CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
1979 bool isVarArg, bool isInReg, unsigned numFixedArgs,
1980 CallingConv::ID callConv, bool isTailCall,
1981 bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
1982 ArgListTy &args, SelectionDAG &dag, SDLoc dl)
1983 : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
1984 IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
1985 IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
1986 NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
1987 Args(args), DAG(dag), DL(dl), CS(NULL) {}
1990 /// This function lowers an abstract call to a function into an actual call.
1991 /// This returns a pair of operands. The first element is the return value
1992 /// for the function (if RetTy is not VoidTy). The second element is the
1993 /// outgoing token chain. It calls LowerCall to do the actual lowering.
1994 std::pair<SDValue, SDValue> LowerCallTo(CallLoweringInfo &CLI) const;
1996 /// This hook must be implemented to lower calls into the the specified
1997 /// DAG. The outgoing arguments to the call are described by the Outs array,
1998 /// and the values to be returned by the call are described by the Ins
1999 /// array. The implementation should fill in the InVals array with legal-type
2000 /// return values from the call, and return the resulting token chain value.
2002 LowerCall(CallLoweringInfo &/*CLI*/,
2003 SmallVectorImpl<SDValue> &/*InVals*/) const {
2004 llvm_unreachable("Not Implemented");
2007 /// Target-specific cleanup for formal ByVal parameters.
2008 virtual void HandleByVal(CCState *, unsigned &, unsigned) const {}
2010 /// This hook should be implemented to check whether the return values
2011 /// described by the Outs array can fit into the return registers. If false
2012 /// is returned, an sret-demotion is performed.
2013 virtual bool CanLowerReturn(CallingConv::ID /*CallConv*/,
2014 MachineFunction &/*MF*/, bool /*isVarArg*/,
2015 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2016 LLVMContext &/*Context*/) const
2018 // Return true by default to get preexisting behavior.
2022 /// This hook must be implemented to lower outgoing return values, described
2023 /// by the Outs array, into the specified DAG. The implementation should
2024 /// return the resulting token chain value.
2026 LowerReturn(SDValue /*Chain*/, CallingConv::ID /*CallConv*/,
2028 const SmallVectorImpl<ISD::OutputArg> &/*Outs*/,
2029 const SmallVectorImpl<SDValue> &/*OutVals*/,
2030 SDLoc /*dl*/, SelectionDAG &/*DAG*/) const {
2031 llvm_unreachable("Not Implemented");
2034 /// Return true if result of the specified node is used by a return node
2035 /// only. It also compute and return the input chain for the tail call.
2037 /// This is used to determine whether it is possible to codegen a libcall as
2038 /// tail call at legalization time.
2039 virtual bool isUsedByReturnOnly(SDNode *, SDValue &/*Chain*/) const {
2043 /// Return true if the target may be able emit the call instruction as a tail
2044 /// call. This is used by optimization passes to determine if it's profitable
2045 /// to duplicate return instructions to enable tailcall optimization.
2046 virtual bool mayBeEmittedAsTailCall(CallInst *) const {
2050 /// Return the type that should be used to zero or sign extend a
2051 /// zeroext/signext integer argument or return value. FIXME: Most C calling
2052 /// convention requires the return type to be promoted, but this is not true
2053 /// all the time, e.g. i1 on x86-64. It is also not necessary for non-C
2054 /// calling conventions. The frontend should handle this and include all of
2055 /// the necessary information.
2056 virtual MVT getTypeForExtArgOrReturn(MVT VT,
2057 ISD::NodeType /*ExtendKind*/) const {
2058 MVT MinVT = getRegisterType(MVT::i32);
2059 return VT.bitsLT(MinVT) ? MinVT : VT;
2062 /// This callback is invoked by the type legalizer to legalize nodes with an
2063 /// illegal operand type but legal result types. It replaces the
2064 /// LowerOperation callback in the type Legalizer. The reason we can not do
2065 /// away with LowerOperation entirely is that LegalizeDAG isn't yet ready to
2066 /// use this callback.
2068 /// TODO: Consider merging with ReplaceNodeResults.
2070 /// The target places new result values for the node in Results (their number
2071 /// and types must exactly match those of the original return values of
2072 /// the node), or leaves Results empty, which indicates that the node is not
2073 /// to be custom lowered after all.
2074 /// The default implementation calls LowerOperation.
2075 virtual void LowerOperationWrapper(SDNode *N,
2076 SmallVectorImpl<SDValue> &Results,
2077 SelectionDAG &DAG) const;
2079 /// This callback is invoked for operations that are unsupported by the
2080 /// target, which are registered to use 'custom' lowering, and whose defined
2081 /// values are all legal. If the target has no operations that require custom
2082 /// lowering, it need not implement this. The default implementation of this
2084 virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
2086 /// This callback is invoked when a node result type is illegal for the
2087 /// target, and the operation was registered to use 'custom' lowering for that
2088 /// result type. The target places new result values for the node in Results
2089 /// (their number and types must exactly match those of the original return
2090 /// values of the node), or leaves Results empty, which indicates that the
2091 /// node is not to be custom lowered after all.
2093 /// If the target has no operations that require custom lowering, it need not
2094 /// implement this. The default implementation aborts.
2095 virtual void ReplaceNodeResults(SDNode * /*N*/,
2096 SmallVectorImpl<SDValue> &/*Results*/,
2097 SelectionDAG &/*DAG*/) const {
2098 llvm_unreachable("ReplaceNodeResults not implemented for this target!");
2101 /// This method returns the name of a target specific DAG node.
2102 virtual const char *getTargetNodeName(unsigned Opcode) const;
2104 /// This method returns a target specific FastISel object, or null if the
2105 /// target does not support "fast" ISel.
2106 virtual FastISel *createFastISel(FunctionLoweringInfo &,
2107 const TargetLibraryInfo *) const {
2111 //===--------------------------------------------------------------------===//
2112 // Inline Asm Support hooks
2115 /// This hook allows the target to expand an inline asm call to be explicit
2116 /// llvm code if it wants to. This is useful for turning simple inline asms
2117 /// into LLVM intrinsics, which gives the compiler more information about the
2118 /// behavior of the code.
2119 virtual bool ExpandInlineAsm(CallInst *) const {
2123 enum ConstraintType {
2124 C_Register, // Constraint represents specific register(s).
2125 C_RegisterClass, // Constraint represents any of register(s) in class.
2126 C_Memory, // Memory constraint.
2127 C_Other, // Something else.
2128 C_Unknown // Unsupported constraint.
2131 enum ConstraintWeight {
2133 CW_Invalid = -1, // No match.
2134 CW_Okay = 0, // Acceptable.
2135 CW_Good = 1, // Good weight.
2136 CW_Better = 2, // Better weight.
2137 CW_Best = 3, // Best weight.
2139 // Well-known weights.
2140 CW_SpecificReg = CW_Okay, // Specific register operands.
2141 CW_Register = CW_Good, // Register operands.
2142 CW_Memory = CW_Better, // Memory operands.
2143 CW_Constant = CW_Best, // Constant operand.
2144 CW_Default = CW_Okay // Default or don't know type.
2147 /// This contains information for each constraint that we are lowering.
2148 struct AsmOperandInfo : public InlineAsm::ConstraintInfo {
2149 /// This contains the actual string for the code, like "m". TargetLowering
2150 /// picks the 'best' code from ConstraintInfo::Codes that most closely
2151 /// matches the operand.
2152 std::string ConstraintCode;
2154 /// Information about the constraint code, e.g. Register, RegisterClass,
2155 /// Memory, Other, Unknown.
2156 TargetLowering::ConstraintType ConstraintType;
2158 /// If this is the result output operand or a clobber, this is null,
2159 /// otherwise it is the incoming operand to the CallInst. This gets
2160 /// modified as the asm is processed.
2161 Value *CallOperandVal;
2163 /// The ValueType for the operand value.
2166 /// Return true of this is an input operand that is a matching constraint
2168 bool isMatchingInputConstraint() const;
2170 /// If this is an input matching constraint, this method returns the output
2171 /// operand it matches.
2172 unsigned getMatchedOperand() const;
2174 /// Copy constructor for copying from an AsmOperandInfo.
2175 AsmOperandInfo(const AsmOperandInfo &info)
2176 : InlineAsm::ConstraintInfo(info),
2177 ConstraintCode(info.ConstraintCode),
2178 ConstraintType(info.ConstraintType),
2179 CallOperandVal(info.CallOperandVal),
2180 ConstraintVT(info.ConstraintVT) {
2183 /// Copy constructor for copying from a ConstraintInfo.
2184 AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
2185 : InlineAsm::ConstraintInfo(info),
2186 ConstraintType(TargetLowering::C_Unknown),
2187 CallOperandVal(0), ConstraintVT(MVT::Other) {
2191 typedef std::vector<AsmOperandInfo> AsmOperandInfoVector;
2193 /// Split up the constraint string from the inline assembly value into the
2194 /// specific constraints and their prefixes, and also tie in the associated
2195 /// operand values. If this returns an empty vector, and if the constraint
2196 /// string itself isn't empty, there was an error parsing.
2197 virtual AsmOperandInfoVector ParseConstraints(ImmutableCallSite CS) const;
2199 /// Examine constraint type and operand type and determine a weight value.
2200 /// The operand object must already have been set up with the operand type.
2201 virtual ConstraintWeight getMultipleConstraintMatchWeight(
2202 AsmOperandInfo &info, int maIndex) const;
2204 /// Examine constraint string and operand type and determine a weight value.
2205 /// The operand object must already have been set up with the operand type.
2206 virtual ConstraintWeight getSingleConstraintMatchWeight(
2207 AsmOperandInfo &info, const char *constraint) const;
2209 /// Determines the constraint code and constraint type to use for the specific
2210 /// AsmOperandInfo, setting OpInfo.ConstraintCode and OpInfo.ConstraintType.
2211 /// If the actual operand being passed in is available, it can be passed in as
2212 /// Op, otherwise an empty SDValue can be passed.
2213 virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
2215 SelectionDAG *DAG = 0) const;
2217 /// Given a constraint, return the type of constraint it is for this target.
2218 virtual ConstraintType getConstraintType(const std::string &Constraint) const;
2220 /// Given a physical register constraint (e.g. {edx}), return the register
2221 /// number and the register class for the register.
2223 /// Given a register class constraint, like 'r', if this corresponds directly
2224 /// to an LLVM register class, return a register of 0 and the register class
2227 /// This should only be used for C_Register constraints. On error, this
2228 /// returns a register number of 0 and a null register class pointer..
2229 virtual std::pair<unsigned, const TargetRegisterClass*>
2230 getRegForInlineAsmConstraint(const std::string &Constraint,
2233 /// Try to replace an X constraint, which matches anything, with another that
2234 /// has more specific requirements based on the type of the corresponding
2235 /// operand. This returns null if there is no replacement to make.
2236 virtual const char *LowerXConstraint(EVT ConstraintVT) const;
2238 /// Lower the specified operand into the Ops vector. If it is invalid, don't
2239 /// add anything to Ops.
2240 virtual void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
2241 std::vector<SDValue> &Ops,
2242 SelectionDAG &DAG) const;
2244 //===--------------------------------------------------------------------===//
2245 // Div utility functions
2247 SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
2248 SelectionDAG &DAG) const;
2249 SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
2250 std::vector<SDNode*> *Created) const;
2251 SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
2252 std::vector<SDNode*> *Created) const;
2254 //===--------------------------------------------------------------------===//
2255 // Instruction Emitting Hooks
2258 // This method should be implemented by targets that mark instructions with
2259 // the 'usesCustomInserter' flag. These instructions are special in various
2260 // ways, which require special support to insert. The specified MachineInstr
2261 // is created but not inserted into any basic blocks, and this method is
2262 // called to expand it into a sequence of instructions, potentially also
2263 // creating new basic blocks and control flow.
2264 virtual MachineBasicBlock *
2265 EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
2267 /// This method should be implemented by targets that mark instructions with
2268 /// the 'hasPostISelHook' flag. These instructions must be adjusted after
2269 /// instruction selection by target hooks. e.g. To fill in optional defs for
2270 /// ARM 's' setting instructions.
2272 AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
2275 /// Given an LLVM IR type and return type attributes, compute the return value
2276 /// EVTs and flags, and optionally also the offsets, if the return value is
2277 /// being lowered to memory.
2278 void GetReturnInfo(Type* ReturnType, AttributeSet attr,
2279 SmallVectorImpl<ISD::OutputArg> &Outs,
2280 const TargetLowering &TLI);
2282 } // end llvm namespace