#ifndef X86ISELLOWERING_H
#define X86ISELLOWERING_H
-#include "X86MachineFunctionInfo.h"
-#include "X86RegisterInfo.h"
-#include "X86Subtarget.h"
#include "llvm/CodeGen/CallingConvLower.h"
-#include "llvm/CodeGen/FastISel.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
namespace llvm {
+ class X86Subtarget;
+ class X86TargetMachine;
+
namespace X86ISD {
// X86 Specific DAG Nodes
enum NodeType {
/// readcyclecounter
RDTSC_DAG,
+ /// X86 Read Time-Stamp Counter and Processor ID.
+ RDTSCP_DAG,
+
/// X86 compare and logical compare instructions.
CMP, COMI, UCOMI,
/// operand, usually produced by a CMP instruction.
SETCC,
+ /// X86 Select
+ SELECT,
+
// Same as SETCC except it's materialized with a sbb and the value is all
// one's or all zero's.
SETCC_CARRY, // R = carry_bit ? ~0 : 0
/// X86 FP SETCC, implemented with CMP{cc}SS/CMP{cc}SD.
/// Operands are two FP values to compare; result is a mask of
/// 0s or 1s. Generally DTRT for C/C++ with NaNs.
- FSETCCss, FSETCCsd,
+ FSETCC,
/// X86 MOVMSK{pd|ps}, extracts sign bits of two or four FP values,
/// result in an integer GPR. Needs masking for scalar result.
/// the list of operands.
TC_RETURN,
- // VZEXT_MOVL - Vector move low and zero extend.
+ // VZEXT_MOVL - Vector move to low scalar and zero higher vector elements.
VZEXT_MOVL,
- // VSEXT_MOVL - Vector move low and sign extend.
- VSEXT_MOVL,
-
// VZEXT - Vector integer zero-extend.
VZEXT,
// VSEXT - Vector integer signed-extend.
VSEXT,
+ // VTRUNC - Vector integer truncate.
+ VTRUNC,
+
+ // VTRUNC - Vector integer truncate with mask.
+ VTRUNCM,
+
// VFPEXT - Vector FP extend.
VFPEXT,
// PCMP* - Vector integer comparisons.
PCMPEQ, PCMPGT,
- // PCMP*M - Vector integer comparisons, the result is in a mask vector
+ // PCMP*M - Vector integer comparisons, the result is in a mask vector.
PCMPEQM, PCMPGTM,
/// CMPM, CMPMU - Vector comparison generating mask bits for fp and
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BLSI, // BLSI - Extract lowest set isolated bit
- BLSMSK, // BLSMSK - Get mask up to lowest set bit
- BLSR, // BLSR - Reset lowest set bit
+ BEXTR, // BEXTR - Bit field extract
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
// MUL_IMM - X86 specific multiply by immediate.
MUL_IMM,
- // PTEST - Vector bitwise comparisons
+ // PTEST - Vector bitwise comparisons.
PTEST,
- // TESTP - Vector packed fp sign bitwise comparisons
+ // TESTP - Vector packed fp sign bitwise comparisons.
TESTP,
+ // TESTM, TESTNM - Vector "test" in AVX-512, the result is in a mask vector.
+ TESTM,
+ TESTNM,
+
// OR/AND test for masks
KORTEST,
- KTEST,
// Several flavors of instructions with vector shuffle behaviors.
PALIGNR,
VPERMILP,
VPERMV,
VPERMV3,
+ VPERMIV3,
VPERMI,
VPERM2X128,
VBROADCAST,
// masked broadcast
VBROADCASTM,
+ // Insert/Extract vector element
+ VINSERT,
+ VEXTRACT,
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
+ // PMULUDQ - Vector multiply packed signed doubleword integers
+ PMULDQ,
// FMA nodes
FMADD,
//===--------------------------------------------------------------------===//
// X86TargetLowering - X86 Implementation of the TargetLowering interface
- class X86TargetLowering : public TargetLowering {
+ class X86TargetLowering final : public TargetLowering {
public:
explicit X86TargetLowering(X86TargetMachine &TM);
- virtual unsigned getJumpTableEncoding() const;
+ unsigned getJumpTableEncoding() const override;
- virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; }
+ MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; }
- virtual const MCExpr *
+ const MCExpr *
LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI,
const MachineBasicBlock *MBB, unsigned uid,
- MCContext &Ctx) const;
+ MCContext &Ctx) const override;
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
- virtual SDValue getPICJumpTableRelocBase(SDValue Table,
- SelectionDAG &DAG) const;
- virtual const MCExpr *
+ SDValue getPICJumpTableRelocBase(SDValue Table,
+ SelectionDAG &DAG) const override;
+ const MCExpr *
getPICJumpTableRelocBaseExpr(const MachineFunction *MF,
- unsigned JTI, MCContext &Ctx) const;
+ unsigned JTI, MCContext &Ctx) const override;
/// getByValTypeAlignment - Return the desired alignment for ByVal aggregate
/// function arguments in the caller parameter area. For X86, aggregates
/// that contains are placed at 16-byte boundaries while the rest are at
/// 4-byte boundaries.
- virtual unsigned getByValTypeAlignment(Type *Ty) const;
+ unsigned getByValTypeAlignment(Type *Ty) const override;
/// getOptimalMemOpType - Returns the target specific optimal type for load
/// and store operations as a result of memset, memcpy, and memmove
/// source is constant so it does not need to be loaded.
/// It returns EVT::Other if the type should be determined using generic
/// target-independent logic.
- virtual EVT
- getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
- bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
- MachineFunction &MF) const;
+ EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign,
+ bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
+ MachineFunction &MF) const override;
/// isSafeMemOpType - Returns true if it's safe to use load / store of the
/// specified type to expand memcpy / memset inline. This is mostly true
/// targets without SSE2 f64 load / store are done with fldl / fstpl which
/// also does type conversion. Note the specified type doesn't have to be
/// legal as the hook is used before type legalization.
- virtual bool isSafeMemOpType(MVT VT) const;
+ bool isSafeMemOpType(MVT VT) const override;
/// allowsUnalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
- virtual bool allowsUnalignedMemoryAccesses(EVT VT, bool *Fast) const;
+ bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool *Fast) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
///
- virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
/// ReplaceNodeResults - Replace the results of node with an illegal result
/// type with new values built out of custom code.
///
- virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
- SelectionDAG &DAG) const;
+ void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue>&Results,
+ SelectionDAG &DAG) const override;
- virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const;
+ SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
/// isTypeDesirableForOp - Return true if the target has native support for
/// the specified value type and it is 'desirable' to use the type for the
/// given node type. e.g. On x86 i16 is legal, but undesirable since i16
/// instruction encodings are longer and some i16 instructions are slow.
- virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const;
+ bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override;
/// isTypeDesirable - Return true if the target has native support for the
/// specified value type and it is 'desirable' to use the type. e.g. On x86
/// i16 is legal, but undesirable since i16 instruction encodings are longer
/// and some i16 instructions are slow.
- virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const;
+ bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override;
- virtual MachineBasicBlock *
+ MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr *MI,
- MachineBasicBlock *MBB) const;
+ MachineBasicBlock *MBB) const override;
/// getTargetNodeName - This method returns the name of a target specific
/// DAG node.
- virtual const char *getTargetNodeName(unsigned Opcode) const;
+ const char *getTargetNodeName(unsigned Opcode) const override;
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
- virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const;
+ EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- virtual void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
- virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
- unsigned Depth) const;
+ unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
+ const SelectionDAG &DAG,
+ unsigned Depth) const override;
- virtual bool
- isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const;
+ bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA,
+ int64_t &Offset) const override;
SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const;
- virtual bool ExpandInlineAsm(CallInst *CI) const;
+ bool ExpandInlineAsm(CallInst *CI) const override;
- ConstraintType getConstraintType(const std::string &Constraint) const;
+ ConstraintType
+ getConstraintType(const std::string &Constraint) const override;
/// Examine constraint string and operand type and determine a weight value.
/// The operand object must already have been set up with the operand type.
- virtual ConstraintWeight getSingleConstraintMatchWeight(
- AsmOperandInfo &info, const char *constraint) const;
+ ConstraintWeight
+ getSingleConstraintMatchWeight(AsmOperandInfo &info,
+ const char *constraint) const override;
- virtual const char *LowerXConstraint(EVT ConstraintVT) const;
+ const char *LowerXConstraint(EVT ConstraintVT) const override;
/// LowerAsmOperandForConstraint - Lower the specified operand into the Ops
/// vector. If it is invalid, don't add anything to Ops. If hasMemory is
/// true it means one of the asm constraint of the inline asm instruction
/// being processed is 'm'.
- virtual void LowerAsmOperandForConstraint(SDValue Op,
- std::string &Constraint,
- std::vector<SDValue> &Ops,
- SelectionDAG &DAG) const;
+ void LowerAsmOperandForConstraint(SDValue Op,
+ std::string &Constraint,
+ std::vector<SDValue> &Ops,
+ SelectionDAG &DAG) const override;
/// getRegForInlineAsmConstraint - Given a physical register constraint
/// (e.g. {edx}), return the register number and the register class for the
/// error, this returns a register number of 0.
std::pair<unsigned, const TargetRegisterClass*>
getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT VT) const;
+ MVT VT) const override;
/// isLegalAddressingMode - Return true if the addressing mode represented
/// by AM is legal for this target, for a load/store of the specified type.
- virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const;
+ bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override;
/// isLegalICmpImmediate - Return true if the specified immediate is legal
/// icmp immediate, that is the target has icmp instructions which can
/// compare a register against the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalICmpImmediate(int64_t Imm) const;
+ bool isLegalICmpImmediate(int64_t Imm) const override;
/// isLegalAddImmediate - Return true if the specified immediate is legal
/// add immediate, that is the target has add instructions which can
/// add a register and the immediate without having to materialize
/// the immediate into a register.
- virtual bool isLegalAddImmediate(int64_t Imm) const;
+ bool isLegalAddImmediate(int64_t Imm) const override;
+
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
+
+ bool isVectorShiftByScalarCheap(Type *Ty) const override;
/// isTruncateFree - Return true if it's free to truncate a value of
/// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in
/// register EAX to i16 by referencing its sub-register AX.
- virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const;
- virtual bool isTruncateFree(EVT VT1, EVT VT2) const;
+ bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
+ bool isTruncateFree(EVT VT1, EVT VT2) const override;
- virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const;
+ bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
/// isZExtFree - Return true if any actual instruction that defines a
/// value of type Ty1 implicit zero-extends the value to Ty2 in the result
/// does not necessarily apply to truncate instructions. e.g. on x86-64,
/// all instructions that define 32-bit values implicit zero-extend the
/// result out to 64 bits.
- virtual bool isZExtFree(Type *Ty1, Type *Ty2) const;
- virtual bool isZExtFree(EVT VT1, EVT VT2) const;
- virtual bool isZExtFree(SDValue Val, EVT VT2) const;
+ bool isZExtFree(Type *Ty1, Type *Ty2) const override;
+ bool isZExtFree(EVT VT1, EVT VT2) const override;
+ bool isZExtFree(SDValue Val, EVT VT2) const override;
/// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster
/// than a pair of fmul and fadd instructions. fmuladd intrinsics will be
/// expanded to FMAs when this method returns true, otherwise fmuladd is
/// expanded to fmul + fadd.
- virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const;
+ bool isFMAFasterThanFMulAndFAdd(EVT VT) const override;
/// isNarrowingProfitable - Return true if it's profitable to narrow
/// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow
/// from i32 to i8 but not from i32 to i16.
- virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const;
+ bool isNarrowingProfitable(EVT VT1, EVT VT2) const override;
/// isFPImmLegal - Returns true if the target can instruction select the
/// specified FP immediate natively. If false, the legalizer will
/// materialize the FP immediate as a load from a constant pool.
- virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const;
+ bool isFPImmLegal(const APFloat &Imm, EVT VT) const override;
/// isShuffleMaskLegal - Targets can use this to indicate that they only
/// support *some* VECTOR_SHUFFLE operations, those with specific masks.
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask
/// values are assumed to be legal.
- virtual bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
- EVT VT) const;
+ bool isShuffleMaskLegal(const SmallVectorImpl<int> &Mask,
+ EVT VT) const override;
/// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is
/// used by Targets can use this to indicate if there is a suitable
/// VECTOR_SHUFFLE that can be used to replace a VAND with a constant
/// pool entry.
- virtual bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
- EVT VT) const;
+ bool isVectorClearMaskLegal(const SmallVectorImpl<int> &Mask,
+ EVT VT) const override;
/// ShouldShrinkFPConstant - If true, then instruction selection should
/// seek to shrink the FP constant of the specified type to a smaller type
/// in order to save space and / or reduce runtime.
- virtual bool ShouldShrinkFPConstant(EVT VT) const {
+ bool ShouldShrinkFPConstant(EVT VT) const override {
// Don't shrink FP constpool if SSE2 is available since cvtss2sd is more
// expensive than a straight movsd. On the other hand, it's important to
// shrink long double fp constant since fldt is very slow.
/// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine
/// for fptoui.
- bool isTargetFTOL() const {
- return Subtarget->isTargetWindows() && !Subtarget->is64Bit();
- }
+ bool isTargetFTOL() const;
/// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be
/// used for fptoui to the given type.
return isTargetFTOL() && VT == MVT::i64;
}
+ /// \brief Returns true if it is beneficial to convert a load of a constant
+ /// to just the constant itself.
+ bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
+ Type *Ty) const override;
+
+ /// Intel processors have a unified instruction and data cache
+ const char * getClearCacheBuiltinName() const override {
+ return nullptr; // nothing to do, move along.
+ }
+
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
+
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
- virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
- const TargetLibraryInfo *libInfo) const;
+ FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
+ const TargetLibraryInfo *libInfo) const override;
/// getStackCookieLocation - Return true if the target stores stack
/// protector cookies at a fixed offset in some non-standard address
/// space, and populates the address space and offset as
/// appropriate.
- virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const;
+ bool getStackCookieLocation(unsigned &AddressSpace,
+ unsigned &Offset) const override;
SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot,
SelectionDAG &DAG) const;
+ bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override;
+
/// \brief Reset the operation actions based on target options.
- virtual void resetOperationActions();
+ void resetOperationActions() override;
protected:
std::pair<const TargetRegisterClass*, uint8_t>
- findRepresentativeClass(MVT VT) const;
+ findRepresentativeClass(MVT VT) const override;
private:
/// Subtarget - Keep a pointer to the X86Subtarget around so that we can
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
+ SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const;
+ SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const;
+
SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerExternalSymbol(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerShiftParts(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerUINT_TO_FP_i64(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_SINT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFP_TO_UINT(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFABS(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFNEG(SDValue Op, SelectionDAG &DAG) const;
- SDValue LowerFCOPYSIGN(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerToBT(SDValue And, ISD::CondCode CC,
SDLoc dl, SelectionDAG &DAG) const;
SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
- virtual SDValue
+ SDValue
LowerFormalArguments(SDValue Chain,
CallingConv::ID CallConv, bool isVarArg,
const SmallVectorImpl<ISD::InputArg> &Ins,
SDLoc dl, SelectionDAG &DAG,
- SmallVectorImpl<SDValue> &InVals) const;
- virtual SDValue
- LowerCall(CallLoweringInfo &CLI,
- SmallVectorImpl<SDValue> &InVals) const;
+ SmallVectorImpl<SDValue> &InVals) const override;
+ SDValue LowerCall(CallLoweringInfo &CLI,
+ SmallVectorImpl<SDValue> &InVals) const override;
- virtual SDValue
- LowerReturn(SDValue Chain,
- CallingConv::ID CallConv, bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- const SmallVectorImpl<SDValue> &OutVals,
- SDLoc dl, SelectionDAG &DAG) const;
+ SDValue LowerReturn(SDValue Chain,
+ CallingConv::ID CallConv, bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ const SmallVectorImpl<SDValue> &OutVals,
+ SDLoc dl, SelectionDAG &DAG) const override;
- virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const;
+ bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override;
- virtual bool mayBeEmittedAsTailCall(CallInst *CI) const;
+ bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- virtual MVT
- getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const;
+ MVT getTypeForExtArgOrReturn(MVT VT,
+ ISD::NodeType ExtendKind) const override;
- virtual bool
- CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
- bool isVarArg,
- const SmallVectorImpl<ISD::OutputArg> &Outs,
- LLVMContext &Context) const;
+ bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
+ bool isVarArg,
+ const SmallVectorImpl<ISD::OutputArg> &Outs,
+ LLVMContext &Context) const override;
+
+ const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override;
/// Utility function to emit atomic-load-arith operations (and, or, xor,
/// nand, max, min, umax, umin). It takes the corresponding instruction to
MachineBasicBlock *emitEHSjLjLongJmp(MachineInstr *MI,
MachineBasicBlock *MBB) const;
+ MachineBasicBlock *emitFMA3Instr(MachineInstr *MI,
+ MachineBasicBlock *MBB) const;
+
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const;
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
+ SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.