#include "llvm/CodeGen/RuntimeLibcalls.h"
#include "llvm/CodeGen/SelectionDAGNodes.h"
#include "llvm/IR/Attributes.h"
+#include "llvm/IR/CallSite.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/InlineAsm.h"
-#include "llvm/Support/CallSite.h"
+#include "llvm/IR/IRBuilder.h"
+#include "llvm/MC/MCRegisterInfo.h"
#include "llvm/Target/TargetCallingConv.h"
#include "llvm/Target/TargetMachine.h"
#include <climits>
class MachineFunction;
class MachineInstr;
class MachineJumpTableInfo;
+ class Mangler;
class MCContext;
class MCExpr;
+ class MCSymbol;
template<typename T> class SmallVectorImpl;
class DataLayout;
class TargetRegisterClass;
public:
const TargetMachine &getTargetMachine() const { return TM; }
- const DataLayout *getDataLayout() const { return TD; }
+ const DataLayout *getDataLayout() const { return DL; }
const TargetLoweringObjectFile &getObjFileLowering() const { return TLOF; }
bool isBigEndian() const { return !IsLittleEndian; }
bool isLittleEndian() const { return IsLittleEndian; }
- // Return the pointer type for the given address space, defaults to
- // the pointer type from the data layout.
- // FIXME: The default needs to be removed once all the code is updated.
+
+ /// Return the pointer type for the given address space, defaults to
+ /// the pointer type from the data layout.
+ /// FIXME: The default needs to be removed once all the code is updated.
virtual MVT getPointerTy(uint32_t /*AS*/ = 0) const;
unsigned getPointerSizeInBits(uint32_t AS = 0) const;
unsigned getPointerTypeSizeInBits(Type *Ty) const;
return true;
}
- /// Return true if a vector of the given type should be split
- /// (TypeSplitVector) instead of promoted (TypePromoteInteger) during type
- /// legalization.
- virtual bool shouldSplitVectorElementType(EVT /*VT*/) const { return false; }
+ /// Return true if multiple condition registers are available.
+ bool hasMultipleConditionRegisters() const {
+ return HasMultipleConditionRegisters;
+ }
+
+ /// Return true if the target has BitExtract instructions.
+ bool hasExtractBitsInsn() const { return HasExtractBitsInsn; }
+
+ /// Return the preferred vector type legalization action.
+ virtual TargetLoweringBase::LegalizeTypeAction
+ getPreferredVectorAction(EVT VT) const {
+ // The default action for one element vectors is to scalarize
+ if (VT.getVectorNumElements() == 1)
+ return TypeScalarizeVector;
+ // The default action for other vectors is to promote
+ return TypePromoteInteger;
+ }
+
+ // There are two general methods for expanding a BUILD_VECTOR node:
+ // 1. Use SCALAR_TO_VECTOR on the defined scalar values and then shuffle
+ // them together.
+ // 2. Build the vector on the stack and then load it.
+ // If this function returns true, then method (1) will be used, subject to
+ // the constraint that all of the necessary shuffles are legal (as determined
+ // by isShuffleMaskLegal). If this function returns false, then method (2) is
+ // always used. The vector type, and the number of defined values, are
+ // provided.
+ virtual bool
+ shouldExpandBuildVectorWithShuffles(EVT /* VT */,
+ unsigned DefinedValues) const {
+ return DefinedValues < 3;
+ }
/// Return true if integer divide is usually cheaper than a sequence of
/// several shifts, adds, and multiplies for this target.
return PredictableSelectIsExpensive;
}
+ /// isLoadBitCastBeneficial() - Return true if the following transform
+ /// is beneficial.
+ /// fold (conv (load x)) -> (load (conv*)x)
+ /// On architectures that don't natively support some vector loads efficiently,
+ /// casting the load to a smaller vector of larger types and loading
+ /// is more efficient, however, this can be undone by optimizations in
+ /// dag combiner.
+ virtual bool isLoadBitCastBeneficial(EVT /* Load */, EVT /* Bitcast */) const {
+ return true;
+ }
+
+ /// \brief Return if the target supports combining a
+ /// chain like:
+ /// \code
+ /// %andResult = and %val1, #imm-with-one-bit-set;
+ /// %icmpResult = icmp %andResult, 0
+ /// br i1 %icmpResult, label %dest1, label %dest2
+ /// \endcode
+ /// into a single machine instruction of a form like:
+ /// \code
+ /// brOnBitSet %register, #bitNumber, dest
+ /// \endcode
+ bool isMaskAndBranchFoldingLegal() const {
+ return MaskAndBranchFoldingIsLegal;
+ }
+
/// Return the ValueType of the result of SETCC operations. Also used to
/// obtain the target's preferred type for the condition operand of SELECT and
/// BRCOND nodes. In the case of BRCOND the argument passed is MVT::Other
/// selects between the two kinds. For example on X86 a scalar boolean should
/// be zero extended from i1, while the elements of a vector of booleans
/// should be sign extended from i1.
- BooleanContent getBooleanContents(bool isVec) const {
- return isVec ? BooleanVectorContents : BooleanContents;
+ ///
+ /// Some cpus also treat floating point types the same way as they treat
+ /// vectors instead of the way they treat scalars.
+ BooleanContent getBooleanContents(bool isVec, bool isFloat) const {
+ if (isVec)
+ return BooleanVectorContents;
+ return isFloat ? BooleanFloatContents : BooleanContents;
+ }
+
+ BooleanContent getBooleanContents(EVT Type) const {
+ return getBooleanContents(Type.isVector(), Type.isFloatingPoint());
}
/// Return target scheduling preference.
bool isTypeLegal(EVT VT) const {
assert(!VT.isSimple() ||
(unsigned)VT.getSimpleVT().SimpleTy < array_lengthof(RegClassForVT));
- return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != 0;
+ return VT.isSimple() && RegClassForVT[VT.getSimpleVT().SimpleTy] != nullptr;
}
class ValueTypeActionImpl {
public:
ValueTypeActionImpl() {
- std::fill(ValueTypeActions, array_endof(ValueTypeActions), 0);
+ std::fill(std::begin(ValueTypeActions), std::end(ValueTypeActions), 0);
}
LegalizeTypeAction getTypeAction(MVT VT) const {
/// Return how this load with extension should be treated: either it is legal,
/// needs to be promoted to a larger size, needs to be expanded to some other
/// code sequence, or the target has a custom expander for it.
- LegalizeAction getLoadExtAction(unsigned ExtType, MVT VT) const {
- assert(ExtType < ISD::LAST_LOADEXT_TYPE && VT < MVT::LAST_VALUETYPE &&
+ LegalizeAction getLoadExtAction(unsigned ExtType, EVT VT) const {
+ if (VT.isExtended()) return Expand;
+ unsigned I = (unsigned) VT.getSimpleVT().SimpleTy;
+ assert(ExtType < ISD::LAST_LOADEXT_TYPE && I < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)LoadExtActions[VT.SimpleTy][ExtType];
+ return (LegalizeAction)LoadExtActions[I][ExtType];
}
/// Return true if the specified load with extension is legal on this target.
/// Return how this store with truncation should be treated: either it is
/// legal, needs to be promoted to a larger size, needs to be expanded to some
/// other code sequence, or the target has a custom expander for it.
- LegalizeAction getTruncStoreAction(MVT ValVT, MVT MemVT) const {
- assert(ValVT < MVT::LAST_VALUETYPE && MemVT < MVT::LAST_VALUETYPE &&
+ LegalizeAction getTruncStoreAction(EVT ValVT, EVT MemVT) const {
+ if (ValVT.isExtended() || MemVT.isExtended()) return Expand;
+ unsigned ValI = (unsigned) ValVT.getSimpleVT().SimpleTy;
+ unsigned MemI = (unsigned) MemVT.getSimpleVT().SimpleTy;
+ assert(ValI < MVT::LAST_VALUETYPE && MemI < MVT::LAST_VALUETYPE &&
"Table isn't big enough!");
- return (LegalizeAction)TruncStoreActions[ValVT.SimpleTy]
- [MemVT.SimpleTy];
+ return (LegalizeAction)TruncStoreActions[ValI][MemI];
}
/// Return true if the specified store with truncation is legal on this
LegalizeAction
getCondCodeAction(ISD::CondCode CC, MVT VT) const {
assert((unsigned)CC < array_lengthof(CondCodeActions) &&
- (unsigned)VT.SimpleTy < sizeof(CondCodeActions[0])*4 &&
+ ((unsigned)VT.SimpleTy >> 4) < array_lengthof(CondCodeActions[0]) &&
"Table isn't big enough!");
- /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
- /// value and the upper 27 bits index into the second dimension of the
- /// array to select what 64bit value to use.
- LegalizeAction Action = (LegalizeAction)
- ((CondCodeActions[CC][VT.SimpleTy >> 5] >> (2*(VT.SimpleTy & 0x1F))) & 3);
+ // See setCondCodeAction for how this is encoded.
+ uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
+ uint32_t Value = CondCodeActions[CC][VT.SimpleTy >> 4];
+ LegalizeAction Action = (LegalizeAction) ((Value >> Shift) & 0x3);
assert(Action != Promote && "Can't promote condition code!");
return Action;
}
/// otherwise it will assert.
EVT getValueType(Type *Ty, bool AllowUnknown = false) const {
// Lower scalar pointers to native pointer types.
- if (Ty->isPointerTy()) return getPointerTy(Ty->getPointerAddressSpace());
+ if (PointerType *PTy = dyn_cast<PointerType>(Ty))
+ return getPointerTy(PTy->getAddressSpace());
if (Ty->isVectorTy()) {
VectorType *VTy = cast<VectorType>(Ty);
Type *Elm = VTy->getElementType();
// Lower vectors of pointers to native pointer types.
- if (Elm->isPointerTy())
- Elm = EVT(PointerTy).getTypeForEVT(Ty->getContext());
+ if (PointerType *PT = dyn_cast<PointerType>(Elm)) {
+ EVT PointerTy(getPointerTy(PT->getAddressSpace()));
+ Elm = PointerTy.getTypeForEVT(Ty->getContext());
+ }
+
return EVT::getVectorVT(Ty->getContext(), EVT::getEVT(Elm, false),
VTy->getNumElements());
}
return getValueType(Ty, AllowUnknown).getSimpleVT();
}
- /// Return the desired alignment for ByVal aggregate function arguments in the
- /// caller parameter area. This is the actual alignment, not its logarithm.
+ /// Return the desired alignment for ByVal or InAlloca aggregate function
+ /// arguments in the caller parameter area. This is the actual alignment, not
+ /// its logarithm.
virtual unsigned getByValTypeAlignment(Type *Ty) const;
/// Return the type of registers that this ValueType will eventually require.
/// reduce runtime.
virtual bool ShouldShrinkFPConstant(EVT) const { return true; }
+ /// When splitting a value of the specified type into parts, does the Lo
+ /// or Hi part come first? This usually follows the endianness, except
+ /// for ppcf128, where the Hi part always comes first.
+ bool hasBigEndianPartOrdering(EVT VT) const {
+ return isBigEndian() || VT == MVT::ppcf128;
+ }
+
/// If true, the target has custom DAG combine transformations that it can
/// perform for the specified node.
bool hasTargetDAGCombine(ISD::NodeType NT) const {
/// \brief Determine if the target supports unaligned memory accesses.
///
- /// This function returns true if the target allows unaligned memory accesses.
- /// of the specified type. If true, it also returns whether the unaligned
- /// memory access is "fast" in the second argument by reference. This is used,
- /// for example, in situations where an array copy/move/set is converted to a
- /// sequence of store operations. It's use helps to ensure that such
- /// replacements don't generate code that causes an alignment error (trap) on
- /// the target machine.
- virtual bool allowsUnalignedMemoryAccesses(EVT, bool * /*Fast*/ = 0) const {
+ /// This function returns true if the target allows unaligned memory accesses
+ /// of the specified type in the given address space. If true, it also returns
+ /// whether the unaligned memory access is "fast" in the last argument by
+ /// reference. This is used, for example, in situations where an array
+ /// copy/move/set is converted to a sequence of store operations. Its use
+ /// helps to ensure that such replacements don't generate code that causes an
+ /// alignment error (trap) on the target machine.
+ virtual bool allowsMisalignedMemoryAccesses(EVT,
+ unsigned AddrSpace = 0,
+ unsigned Align = 1,
+ bool * /*Fast*/ = nullptr) const {
return false;
}
return UseUnderscoreLongJmp;
}
- /// Return whether the target can generate code for jump tables.
- bool supportJumpTables() const {
- return SupportJumpTables;
- }
-
/// Return integer threshold on number of blocks to use jump tables rather
/// than if sequence.
int getMinimumJumpTableEntries() const {
return 0;
}
+ /// Returns true if a cast between SrcAS and DestAS is a noop.
+ virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const {
+ return false;
+ }
+
//===--------------------------------------------------------------------===//
/// \name Helpers for TargetTransformInfo implementations
/// @{
/// @}
+ //===--------------------------------------------------------------------===//
+ /// \name Helpers for load-linked/store-conditional atomic expansion.
+ /// @{
+
+ /// Perform a load-linked operation on Addr, returning a "Value *" with the
+ /// corresponding pointee type. This may entail some non-trivial operations to
+ /// truncate or reconstruct types that will be illegal in the backend. See
+ /// ARMISelLowering for an example implementation.
+ virtual Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
+ AtomicOrdering Ord) const {
+ llvm_unreachable("Load linked unimplemented on this target");
+ }
+
+ /// Perform a store-conditional operation to Addr. Return the status of the
+ /// store. This should be 0 if the store succeeded, non-zero otherwise.
+ virtual Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
+ Value *Addr, AtomicOrdering Ord) const {
+ llvm_unreachable("Store conditional unimplemented on this target");
+ }
+
+ /// Return true if the given (atomic) instruction should be expanded by the
+ /// IR-level AtomicExpandLoadLinked pass into a loop involving
+ /// load-linked/store-conditional pairs. Atomic stores will be expanded in the
+ /// same way as "atomic xchg" operations which ignore their output if needed.
+ virtual bool shouldExpandAtomicInIR(Instruction *Inst) const {
+ return false;
+ }
+
+
//===--------------------------------------------------------------------===//
// TargetLowering Configuration Methods - These methods should be invoked by
// the derived class constructor to configure this object for the target.
virtual void resetOperationActions() {}
protected:
- /// Specify how the target extends the result of a boolean value from i1 to a
- /// wider type. See getBooleanContents.
- void setBooleanContents(BooleanContent Ty) { BooleanContents = Ty; }
+ /// Specify how the target extends the result of integer and floating point
+ /// boolean values from i1 to a wider type. See getBooleanContents.
+ void setBooleanContents(BooleanContent Ty) {
+ BooleanContents = Ty;
+ BooleanFloatContents = Ty;
+ }
+
+ /// Specify how the target extends the result of integer and floating point
+ /// boolean values from i1 to a wider type. See getBooleanContents.
+ void setBooleanContents(BooleanContent IntTy, BooleanContent FloatTy) {
+ BooleanContents = IntTy;
+ BooleanFloatContents = FloatTy;
+ }
/// Specify how the target extends the result of a vector boolean value from a
/// vector of i1 to a wider type. See getBooleanContents.
}
/// Indicate whether this target prefers to use _setjmp to implement
- /// llvm.setjmp or the non _ version. Defaults to false.
+ /// llvm.setjmp or the version without _. Defaults to false.
void setUseUnderscoreSetJmp(bool Val) {
UseUnderscoreSetJmp = Val;
}
/// Indicate whether this target prefers to use _longjmp to implement
- /// llvm.longjmp or the non _ version. Defaults to false.
+ /// llvm.longjmp or the version without _. Defaults to false.
void setUseUnderscoreLongJmp(bool Val) {
UseUnderscoreLongJmp = Val;
}
- /// Indicate whether the target can generate code for jump tables.
- void setSupportJumpTables(bool Val) {
- SupportJumpTables = Val;
- }
-
/// Indicate the number of blocks to generate jump tables rather than if
/// sequence.
void setMinimumJumpTableEntries(int Val) {
SelectIsExpensive = isExpensive;
}
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// condition registers that can be used to store the results of comparisons
+ /// for use by selects and conditional branches. With multiple condition
+ /// registers, the code generator will not aggressively sink comparisons into
+ /// the blocks of their users.
+ void setHasMultipleConditionRegisters(bool hasManyRegs = true) {
+ HasMultipleConditionRegisters = hasManyRegs;
+ }
+
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ void setHasExtractBitsInsn(bool hasExtractInsn = true) {
+ HasExtractBitsInsn = hasExtractInsn;
+ }
+
/// Tells the code generator not to expand sequence of operations into a
/// separate sequences that increases the amount of flow control.
void setJumpIsExpensive(bool isExpensive = true) {
assert(VT < MVT::LAST_VALUETYPE &&
(unsigned)CC < array_lengthof(CondCodeActions) &&
"Table isn't big enough!");
- /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 64bit
- /// value and the upper 27 bits index into the second dimension of the
- /// array to select what 64bit value to use.
- CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
- &= ~(uint64_t(3UL) << (VT.SimpleTy & 0x1F)*2);
- CondCodeActions[(unsigned)CC][VT.SimpleTy >> 5]
- |= (uint64_t)Action << (VT.SimpleTy & 0x1F)*2;
+ /// The lower 5 bits of the SimpleTy index into Nth 2bit set from the 32-bit
+ /// value and the upper 27 bits index into the second dimension of the array
+ /// to select what 32-bit value to use.
+ uint32_t Shift = 2 * (VT.SimpleTy & 0xF);
+ CondCodeActions[CC][VT.SimpleTy >> 4] &= ~((uint32_t)0x3 << Shift);
+ CondCodeActions[CC][VT.SimpleTy >> 4] |= (uint32_t)Action << Shift;
}
/// If Opc/OrigVT is specified as being promoted, the promotion code defaults
int64_t BaseOffs;
bool HasBaseReg;
int64_t Scale;
- AddrMode() : BaseGV(0), BaseOffs(0), HasBaseReg(false), Scale(0) {}
+ AddrMode() : BaseGV(nullptr), BaseOffs(0), HasBaseReg(false), Scale(0) {}
};
/// Return true if the addressing mode represented by AM is legal for this
return true;
}
+ /// Return true if it's significantly cheaper to shift a vector by a uniform
+ /// scalar than by an amount which will vary across each lane. On x86, for
+ /// example, there is a "psllw" instruction for the former case, but no simple
+ /// instruction for a general "a << b" operation on vectors.
+ virtual bool isVectorShiftByScalarCheap(Type *Ty) const {
+ return false;
+ }
+
/// Return true if it's free to truncate a value of type Ty1 to type
/// Ty2. e.g. On x86 it's free to truncate a i32 value in register EAX to i16
/// by referencing its sub-register AX.
return false;
}
+ /// Return true if the target supplies and combines to a paired load
+ /// two loaded values of type LoadedType next to each other in memory.
+ /// RequiredAlignment gives the minimal alignment constraints that must be met
+ /// to be able to select this paired load.
+ ///
+ /// This information is *not* used to generate actual paired loads, but it is
+ /// used to generate a sequence of loads that is easier to combine into a
+ /// paired load.
+ /// For instance, something like this:
+ /// a = load i64* addr
+ /// b = trunc i64 a to i32
+ /// c = lshr i64 a, 32
+ /// d = trunc i64 c to i32
+ /// will be optimized into:
+ /// b = load i32* addr1
+ /// d = load i32* addr2
+ /// Where addr1 = addr2 +/- sizeof(i32).
+ ///
+ /// In other words, unless the target performs a post-isel load combining,
+ /// this information should not be provided because it will generate more
+ /// loads.
+ virtual bool hasPairedLoad(Type * /*LoadedType*/,
+ unsigned & /*RequiredAligment*/) const {
+ return false;
+ }
+
+ virtual bool hasPairedLoad(EVT /*LoadedType*/,
+ unsigned & /*RequiredAligment*/) const {
+ return false;
+ }
+
/// Return true if zero-extending the specific node Val to type VT2 is free
/// (either because it's implicitly zero-extended such as ARM ldrb / ldrh or
/// because it's folded such as X86 zero-extending loads).
return false;
}
+ /// \brief Return true if it is beneficial to convert a load of a constant to
+ /// just the constant itself.
+ /// On some targets it might be more efficient to use a combination of
+ /// arithmetic instructions to materialize the constant instead of loading it
+ /// from a constant pool.
+ virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const {
+ return false;
+ }
//===--------------------------------------------------------------------===//
// Runtime Library hooks
//
private:
const TargetMachine &TM;
- const DataLayout *TD;
+ const DataLayout *DL;
const TargetLoweringObjectFile &TLOF;
- /// The type to use for pointers for the default address space, usually i32 or
- /// i64.
- MVT PointerTy;
-
/// True if this is a little endian target.
bool IsLittleEndian;
/// the select operations if possible.
bool SelectIsExpensive;
+ /// Tells the code generator that the target has multiple (allocatable)
+ /// condition registers that can be used to store the results of comparisons
+ /// for use by selects and conditional branches. With multiple condition
+ /// registers, the code generator will not aggressively sink comparisons into
+ /// the blocks of their users.
+ bool HasMultipleConditionRegisters;
+
+ /// Tells the code generator that the target has BitExtract instructions.
+ /// The code generator will aggressively sink "shift"s into the blocks of
+ /// their users if the users will generate "and" instructions which can be
+ /// combined with "shift" to BitExtract instructions.
+ bool HasExtractBitsInsn;
+
/// Tells the code generator not to expand integer divides by constants into a
/// sequence of muls, adds, and shifts. This is a hack until a real cost
/// model is in place. If we ever optimize for size, this will be set to true
/// Defaults to false.
bool UseUnderscoreLongJmp;
- /// Whether the target can generate code for jumptables. If it's not true,
- /// then each jumptable must be lowered into if-then-else's.
- bool SupportJumpTables;
-
/// Number of blocks threshold to use jump tables.
int MinimumJumpTableEntries;
/// a type wider than i1. See getBooleanContents.
BooleanContent BooleanContents;
+ /// Information about the contents of the high-bits in boolean values held in
+ /// a type wider than i1. See getBooleanContents.
+ BooleanContent BooleanFloatContents;
+
/// Information about the contents of the high-bits in boolean vector values
/// when the element type is wider than i1. See getBooleanContents.
BooleanContent BooleanVectorContents;
/// indicates how instruction selection should deal with the condition code.
///
/// Because each CC action takes up 2 bits, we need to have the array size be
- /// large enough to fit all of the value types. This can be done by dividing
- /// the MVT::LAST_VALUETYPE by 32 and adding one.
- uint64_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE / 32) + 1];
+ /// large enough to fit all of the value types. This can be done by rounding
+ /// up the MVT::LAST_VALUETYPE value to the next multiple of 16.
+ uint32_t CondCodeActions[ISD::SETCC_INVALID][(MVT::LAST_VALUETYPE + 15) / 16];
ValueTypeActionImpl ValueTypeActions;
LegalizeTypeAction LA = ValueTypeActions.getTypeAction(SVT);
assert(
- (LA == TypeLegal ||
+ (LA == TypeLegal || LA == TypeSoftenFloat ||
ValueTypeActions.getTypeAction(NVT) != TypePromoteInteger)
&& "Promote may not follow Expand or Promote");
if (NumElts == 1)
return LegalizeKind(TypeScalarizeVector, EltVT);
- // Try to widen vector elements until the element type is a power of two and
+ // Try to widen vector elements until the element type is a power of two and
// promote it to a legal type later on, for example:
// <3 x i8> -> <4 x i8> -> <4 x i32>
if (EltVT.isInteger()) {
// Stop trying when getting a non-simple element type.
// Note that vector elements may be greater than legal vector element
- // types. Example: X86 XMM registers hold 64bit element on 32bit systems.
+ // types. Example: X86 XMM registers hold 64bit element on 32bit
+ // systems.
if (!EltVT.isSimple()) break;
// Build a new vector type and check if it is legal.
/// the branch is usually predicted right.
bool PredictableSelectIsExpensive;
+ /// MaskAndBranchFoldingIsLegal - Indicates if the target supports folding
+ /// a mask of a single bit, a compare, and a branch into a single instruction.
+ bool MaskAndBranchFoldingIsLegal;
+
protected:
/// Return true if the value types that can be represented by the specified
/// register class are all legal.
bool isLegalRC(const TargetRegisterClass *RC) const;
+
+ /// Replace/modify any TargetFrameIndex operands with a targte-dependent
+ /// sequence of memory operands that is recognized by PrologEpilogInserter.
+ MachineBasicBlock *emitPatchPoint(MachineInstr *MI, MachineBasicBlock *MBB) const;
};
/// This class defines information used to lower LLVM code to legal SelectionDAG
/// by reference if this node can be combined with a load / store to form a
/// post-indexed load / store.
virtual bool getPostIndexedAddressParts(SDNode * /*N*/, SDNode * /*Op*/,
- SDValue &/*Base*/, SDValue &/*Offset*/,
+ SDValue &/*Base*/,
+ SDValue &/*Offset*/,
ISD::MemIndexedMode &/*AM*/,
SelectionDAG &/*DAG*/) const {
return false;
/// Determine which of the bits specified in Mask are known to be either zero
/// or one and return them in the KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ virtual void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const;
/// This method can be implemented by targets that want to expose additional
/// information about sign bits to the DAG Combiner.
virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
unsigned Depth = 0) const;
struct DAGCombinerInfo {
void CommitTargetLoweringOpt(const TargetLoweringOpt &TLO);
};
+ /// Return if the N is a constant or constant vector equal to the true value
+ /// from getBooleanContents().
+ bool isConstTrueVal(const SDNode *N) const;
+
+ /// Return if the N is a constant or constant vector equal to the false value
+ /// from getBooleanContents().
+ bool isConstFalseVal(const SDNode *N) const;
+
/// Try to simplify a setcc built with the specified operands and cc. If it is
/// unable to simplify it, return a null SDValue.
SDValue SimplifySetCC(EVT VT, SDValue N0, SDValue N1,
///
virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ /// Return true if it is profitable to move a following shift through this
+ // node, adjusting any immediate operands as necessary to preserve semantics.
+ // This transformation may not be desirable if it disrupts a particularly
+ // auspicious target-specific tree (e.g. bitfield extraction in AArch64).
+ // By default, it returns true.
+ virtual bool isDesirableToCommuteWithShift(const SDNode *N /*Op*/) const {
+ return true;
+ }
+
/// Return true if the target has native support for the specified value type
/// and it is 'desirable' to use the type for the given node type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
bool isSRet : 1;
bool isNest : 1;
bool isByVal : 1;
+ bool isInAlloca : 1;
bool isReturned : 1;
uint16_t Alignment;
ArgListEntry() : isSExt(false), isZExt(false), isInReg(false),
- isSRet(false), isNest(false), isByVal(false), isReturned(false),
- Alignment(0) { }
+ isSRet(false), isNest(false), isByVal(false), isInAlloca(false),
+ isReturned(false), Alignment(0) { }
+
+ void setAttributes(ImmutableCallSite *CS, unsigned AttrIdx);
};
typedef std::vector<ArgListEntry> ArgListTy;
unsigned NumFixedArgs;
CallingConv::ID CallConv;
SDValue Callee;
- ArgListTy &Args;
+ ArgListTy Args;
SelectionDAG &DAG;
SDLoc DL;
ImmutableCallSite *CS;
SmallVector<SDValue, 32> OutVals;
SmallVector<ISD::InputArg, 32> Ins;
+ CallLoweringInfo(SelectionDAG &DAG)
+ : RetTy(nullptr), RetSExt(false), RetZExt(false), IsVarArg(false),
+ IsInReg(false), DoesNotReturn(false), IsReturnValueUsed(true),
+ IsTailCall(false), NumFixedArgs(-1), CallConv(CallingConv::C),
+ DAG(DAG), CS(nullptr) {}
+
+ CallLoweringInfo &setDebugLoc(SDLoc dl) {
+ DL = dl;
+ return *this;
+ }
+
+ CallLoweringInfo &setChain(SDValue InChain) {
+ Chain = InChain;
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(CallingConv::ID CC, Type *ResultType,
+ SDValue Target, ArgListTy &&ArgsList,
+ unsigned FixedArgs = -1) {
+ RetTy = ResultType;
+ Callee = Target;
+ CallConv = CC;
+ NumFixedArgs =
+ (FixedArgs == static_cast<unsigned>(-1) ? Args.size() : FixedArgs);
+ Args = std::move(ArgsList);
+ return *this;
+ }
+
+ CallLoweringInfo &setCallee(Type *ResultType, FunctionType *FTy,
+ SDValue Target, ArgListTy &&ArgsList,
+ ImmutableCallSite &Call) {
+ RetTy = ResultType;
+
+ IsInReg = Call.paramHasAttr(0, Attribute::InReg);
+ DoesNotReturn = Call.doesNotReturn();
+ IsVarArg = FTy->isVarArg();
+ IsReturnValueUsed = !Call.getInstruction()->use_empty();
+ RetSExt = Call.paramHasAttr(0, Attribute::SExt);
+ RetZExt = Call.paramHasAttr(0, Attribute::ZExt);
+
+ Callee = Target;
+
+ CallConv = Call.getCallingConv();
+ NumFixedArgs = FTy->getNumParams();
+ Args = std::move(ArgsList);
+
+ CS = &Call;
+
+ return *this;
+ }
+
+ CallLoweringInfo &setInRegister(bool Value = true) {
+ IsInReg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setNoReturn(bool Value = true) {
+ DoesNotReturn = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setVarArg(bool Value = true) {
+ IsVarArg = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setTailCall(bool Value = true) {
+ IsTailCall = Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setDiscardResult(bool Value = true) {
+ IsReturnValueUsed = !Value;
+ return *this;
+ }
+
+ CallLoweringInfo &setSExtResult(bool Value = true) {
+ RetSExt = Value;
+ return *this;
+ }
- /// Constructs a call lowering context based on the ImmutableCallSite \p cs.
- CallLoweringInfo(SDValue chain, Type *retTy,
- FunctionType *FTy, bool isTailCall, SDValue callee,
- ArgListTy &args, SelectionDAG &dag, SDLoc dl,
- ImmutableCallSite &cs)
- : Chain(chain), RetTy(retTy), RetSExt(cs.paramHasAttr(0, Attribute::SExt)),
- RetZExt(cs.paramHasAttr(0, Attribute::ZExt)), IsVarArg(FTy->isVarArg()),
- IsInReg(cs.paramHasAttr(0, Attribute::InReg)),
- DoesNotReturn(cs.doesNotReturn()),
- IsReturnValueUsed(!cs.getInstruction()->use_empty()),
- IsTailCall(isTailCall), NumFixedArgs(FTy->getNumParams()),
- CallConv(cs.getCallingConv()), Callee(callee), Args(args), DAG(dag),
- DL(dl), CS(&cs) {}
-
- /// Constructs a call lowering context based on the provided call
- /// information.
- CallLoweringInfo(SDValue chain, Type *retTy, bool retSExt, bool retZExt,
- bool isVarArg, bool isInReg, unsigned numFixedArgs,
- CallingConv::ID callConv, bool isTailCall,
- bool doesNotReturn, bool isReturnValueUsed, SDValue callee,
- ArgListTy &args, SelectionDAG &dag, SDLoc dl)
- : Chain(chain), RetTy(retTy), RetSExt(retSExt), RetZExt(retZExt),
- IsVarArg(isVarArg), IsInReg(isInReg), DoesNotReturn(doesNotReturn),
- IsReturnValueUsed(isReturnValueUsed), IsTailCall(isTailCall),
- NumFixedArgs(numFixedArgs), CallConv(callConv), Callee(callee),
- Args(args), DAG(dag), DL(dl), CS(NULL) {}
+ CallLoweringInfo &setZExtResult(bool Value = true) {
+ RetZExt = Value;
+ return *this;
+ }
+
+ ArgListTy &getArgs() {
+ return Args;
+ }
};
/// This function lowers an abstract call to a function into an actual call.
return false;
}
+ /// Return the builtin name for the __builtin___clear_cache intrinsic
+ /// Default is to invoke the clear cache library call
+ virtual const char * getClearCacheBuiltinName() const {
+ return "__clear_cache";
+ }
+
+ /// Return the register ID of the name passed in. Used by named register
+ /// global variables extension. There is no target-independent behaviour
+ /// so the default action is to bail.
+ virtual unsigned getRegisterByName(const char* RegName, EVT VT) const {
+ report_fatal_error("Named registers not implemented for this target");
+ }
+
/// Return the type that should be used to zero or sign extend a
/// zeroext/signext integer argument or return value. FIXME: Most C calling
/// convention requires the return type to be promoted, but this is not true
return VT.bitsLT(MinVT) ? MinVT : VT;
}
+ /// For some targets, an LLVM struct type must be broken down into multiple
+ /// simple types, but the calling convention specifies that the entire struct
+ /// must be passed in a block of consecutive registers.
+ virtual bool
+ functionArgumentNeedsConsecutiveRegisters(Type *Ty, CallingConv::ID CallConv,
+ bool isVarArg) const {
+ return false;
+ }
+
+ /// Returns a 0 terminated array of registers that can be safely used as
+ /// scratch registers.
+ virtual const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const {
+ return nullptr;
+ }
+
+ /// This callback is used to prepare for a volatile or atomic load.
+ /// It takes a chain node as input and returns the chain for the load itself.
+ ///
+ /// Having a callback like this is necessary for targets like SystemZ,
+ /// which allows a CPU to reuse the result of a previous load indefinitely,
+ /// even if a cache-coherent store is performed by another CPU. The default
+ /// implementation does nothing.
+ virtual SDValue prepareVolatileOrAtomicLoad(SDValue Chain, SDLoc DL,
+ SelectionDAG &DAG) const {
+ return Chain;
+ }
+
/// This callback is invoked by the type legalizer to legalize nodes with an
/// illegal operand type but legal result types. It replaces the
/// LowerOperation callback in the type Legalizer. The reason we can not do
/// target does not support "fast" ISel.
virtual FastISel *createFastISel(FunctionLoweringInfo &,
const TargetLibraryInfo *) const {
- return 0;
+ return nullptr;
}
+
+ bool verifyReturnAddressArgumentIsConstant(SDValue Op,
+ SelectionDAG &DAG) const;
+
//===--------------------------------------------------------------------===//
// Inline Asm Support hooks
//
/// operand it matches.
unsigned getMatchedOperand() const;
- /// Copy constructor for copying from an AsmOperandInfo.
- AsmOperandInfo(const AsmOperandInfo &info)
- : InlineAsm::ConstraintInfo(info),
- ConstraintCode(info.ConstraintCode),
- ConstraintType(info.ConstraintType),
- CallOperandVal(info.CallOperandVal),
- ConstraintVT(info.ConstraintVT) {
- }
-
/// Copy constructor for copying from a ConstraintInfo.
AsmOperandInfo(const InlineAsm::ConstraintInfo &info)
: InlineAsm::ConstraintInfo(info),
ConstraintType(TargetLowering::C_Unknown),
- CallOperandVal(0), ConstraintVT(MVT::Other) {
+ CallOperandVal(nullptr), ConstraintVT(MVT::Other) {
}
};
/// Op, otherwise an empty SDValue can be passed.
virtual void ComputeConstraintToUse(AsmOperandInfo &OpInfo,
SDValue Op,
- SelectionDAG *DAG = 0) const;
+ SelectionDAG *DAG = nullptr) const;
/// Given a constraint, return the type of constraint it is for this target.
virtual ConstraintType getConstraintType(const std::string &Constraint) const;
//
SDValue BuildExactSDIV(SDValue Op1, SDValue Op2, SDLoc dl,
SelectionDAG &DAG) const;
- SDValue BuildSDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const;
- SDValue BuildUDIV(SDNode *N, SelectionDAG &DAG, bool IsAfterLegalization,
- std::vector<SDNode*> *Created) const;
+ SDValue BuildSDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+ SDValue BuildUDIV(SDNode *N, const APInt &Divisor, SelectionDAG &DAG,
+ bool IsAfterLegalization,
+ std::vector<SDNode *> *Created) const;
+ virtual SDValue BuildSDIVPow2(SDNode *N, const APInt &Divisor,
+ SelectionDAG &DAG,
+ std::vector<SDNode *> *Created) const {
+ return SDValue();
+ }
+
+ //===--------------------------------------------------------------------===//
+ // Legalization utility functions
+ //
+
+ /// Expand a MUL into two nodes. One that computes the high bits of
+ /// the result and one that computes the low bits.
+ /// \param HiLoVT The value type to use for the Lo and Hi nodes.
+ /// \param LL Low bits of the LHS of the MUL. You can use this parameter
+ /// if you want to control how low bits are extracted from the LHS.
+ /// \param LH High bits of the LHS of the MUL. See LL for meaning.
+ /// \param RL Low bits of the RHS of the MUL. See LL for meaning
+ /// \param RH High bits of the RHS of the MUL. See LL for meaning.
+ /// \returns true if the node has been expanded. false if it has not
+ bool expandMUL(SDNode *N, SDValue &Lo, SDValue &Hi, EVT HiLoVT,
+ SelectionDAG &DAG, SDValue LL = SDValue(),
+ SDValue LH = SDValue(), SDValue RL = SDValue(),
+ SDValue RH = SDValue()) const;
+
+ /// Expand float(f32) to SINT(i64) conversion
+ /// \param N Node to expand
+ /// \param Result output after conversion
+ /// \returns True, if the expansion was successful, false otherwise
+ bool expandFP_TO_SINT(SDNode *N, SDValue &Result, SelectionDAG &DAG) const;
//===--------------------------------------------------------------------===//
// Instruction Emitting Hooks
//
- // This method should be implemented by targets that mark instructions with
- // the 'usesCustomInserter' flag. These instructions are special in various
- // ways, which require special support to insert. The specified MachineInstr
- // is created but not inserted into any basic blocks, and this method is
- // called to expand it into a sequence of instructions, potentially also
- // creating new basic blocks and control flow.
+ /// This method should be implemented by targets that mark instructions with
+ /// the 'usesCustomInserter' flag. These instructions are special in various
+ /// ways, which require special support to insert. The specified MachineInstr
+ /// is created but not inserted into any basic blocks, and this method is
+ /// called to expand it into a sequence of instructions, potentially also
+ /// creating new basic blocks and control flow.
virtual MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI, MachineBasicBlock *MBB) const;
/// ARM 's' setting instructions.
virtual void
AdjustInstrPostInstrSelection(MachineInstr *MI, SDNode *Node) const;
+
+ /// If this function returns true, SelectionDAGBuilder emits a
+ /// LOAD_STACK_GUARD node when it is lowering Intrinsic::stackprotector.
+ virtual bool useLoadStackGuardNode() const {
+ return false;
+ }
};
/// Given an LLVM IR type and return type attributes, compute the return value