X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86ISelLowering.h;h=90d783697212cd4c121abef264b901e40bff7b9b;hb=d996c5b54bff05047ab628447446d5db470ee59c;hp=6e4bd72532ee7061265f37e570bd4186ea1b2f9b;hpb=f2c9fef815bbe17949c8a6b72ec6f7b17d7f19ed;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index 6e4bd72532e..90d78369721 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -12,19 +12,18 @@ // //===----------------------------------------------------------------------===// -#ifndef X86ISELLOWERING_H -#define X86ISELLOWERING_H +#ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H +#define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H -#include "X86MachineFunctionInfo.h" -#include "X86RegisterInfo.h" -#include "X86Subtarget.h" #include "llvm/CodeGen/CallingConvLower.h" -#include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetOptions.h" namespace llvm { + class X86Subtarget; + class X86TargetMachine; + namespace X86ISD { // X86 Specific DAG Nodes enum NodeType { @@ -84,6 +83,12 @@ namespace llvm { /// readcyclecounter RDTSC_DAG, + /// X86 Read Time-Stamp Counter and Processor ID. + RDTSCP_DAG, + + /// X86 Read Performance Monitoring Counters. + RDPMC_DAG, + /// X86 compare and logical compare instructions. CMP, COMI, UCOMI, @@ -292,7 +297,6 @@ namespace llvm { ADD, SUB, ADC, SBB, SMUL, INC, DEC, OR, XOR, AND, - BZHI, // BZHI - Zero high bits BEXTR, // BEXTR - Bit field extract UMUL, // LOW, HI, FLAGS = umul LHS, RHS @@ -314,7 +318,12 @@ namespace llvm { KORTEST, // Several flavors of instructions with vector shuffle behaviors. + PACKSS, + PACKUS, + // Intra-lane alignr PALIGNR, + // AVX512 inter-lane alignr + VALIGN, PSHUFD, PSHUFHW, PSHUFLW, @@ -346,6 +355,8 @@ namespace llvm { // PMULUDQ - Vector multiply packed unsigned doubleword integers PMULUDQ, + // PMULUDQ - Vector multiply packed signed doubleword integers + PMULDQ, // FMA nodes FMADD, @@ -397,23 +408,8 @@ namespace llvm { // XTEST - Test if in transactional execution. XTEST, - // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG, - // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG - - // Atomic 64-bit binary operations. - ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, - ATOMSUB64_DAG, - ATOMOR64_DAG, - ATOMXOR64_DAG, - ATOMAND64_DAG, - ATOMNAND64_DAG, - ATOMMAX64_DAG, - ATOMMIN64_DAG, - ATOMUMAX64_DAG, - ATOMUMIN64_DAG, - ATOMSWAP64_DAG, - // LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap. - LCMPXCHG_DAG, + LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE, LCMPXCHG8_DAG, LCMPXCHG16_DAG, @@ -518,11 +514,21 @@ namespace llvm { /// own arguments. Callee pop is necessary to support tail calls. bool isCalleePop(CallingConv::ID CallingConv, bool is64Bit, bool IsVarArg, bool TailCallOpt); + + /// AVX512 static rounding constants. These need to match the values in + /// avx512fintrin.h. + enum STATIC_ROUNDING { + TO_NEAREST_INT = 0, + TO_NEG_INF = 1, + TO_POS_INF = 2, + TO_ZERO = 3, + CUR_DIRECTION = 4 + }; } //===--------------------------------------------------------------------===// // X86TargetLowering - X86 Implementation of the TargetLowering interface - class X86TargetLowering : public TargetLowering { + class X86TargetLowering final : public TargetLowering { public: explicit X86TargetLowering(X86TargetMachine &TM); @@ -572,10 +578,10 @@ namespace llvm { /// legal as the hook is used before type legalization. bool isSafeMemOpType(MVT VT) const override; - /// allowsUnalignedMemoryAccesses - Returns true if the target allows + /// allowsMisalignedMemoryAccesses - Returns true if the target allows /// unaligned memory accesses. of the specified type. Returns whether it /// is "fast" by reference in the second argument. - bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS, + bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align, bool *Fast) const override; /// LowerOperation - Provide custom lowering hooks for some operations. @@ -615,18 +621,19 @@ namespace llvm { /// getSetCCResultType - Return the value type to use for ISD::SETCC. EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override; - /// computeMaskedBitsForTargetNode - Determine which of the bits specified + /// computeKnownBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. - void computeMaskedBitsForTargetNode(const SDValue Op, - APInt &KnownZero, - APInt &KnownOne, - const SelectionDAG &DAG, - unsigned Depth = 0) const override; + void computeKnownBitsForTargetNode(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth = 0) const override; // ComputeNumSignBitsForTargetNode - Determine the number of bits in the // operation that are sign bits. unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + const SelectionDAG &DAG, unsigned Depth) const override; bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA, @@ -680,6 +687,12 @@ namespace llvm { /// the immediate into a register. bool isLegalAddImmediate(int64_t Imm) const override; + /// \brief Return the cost of the scaling factor used in the addressing + /// mode represented by AM for this target, for a load/store + /// of the specified type. + /// If the AM is supported, the return value must be >= 0. + /// If the AM is not supported, it returns a negative value. + int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override; bool isVectorShiftByScalarCheap(Type *Ty) const override; @@ -756,9 +769,7 @@ namespace llvm { /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine /// for fptoui. - bool isTargetFTOL() const { - return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); - } + bool isTargetFTOL() const; /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be /// used for fptoui to the given type. @@ -771,6 +782,13 @@ namespace llvm { bool shouldConvertConstantLoadToIntImm(const APInt &Imm, Type *Ty) const override; + /// Intel processors have a unified instruction and data cache + const char * getClearCacheBuiltinName() const override { + return nullptr; // nothing to do, move along. + } + + unsigned getRegisterByName(const char* RegName, EVT VT) const override; + /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. FastISel *createFastISel(FunctionLoweringInfo &funcInfo, @@ -791,6 +809,10 @@ namespace llvm { /// \brief Reset the operation actions based on target options. void resetOperationActions() override; + bool useLoadStackGuardNode() const override; + /// \brief Customize the preferred legalization strategy for certain types. + LegalizeTypeAction getPreferredVectorAction(EVT VT) const override; + protected: std::pair findRepresentativeClass(MVT VT) const override; @@ -867,8 +889,11 @@ namespace llvm { SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const; + SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; @@ -904,6 +929,7 @@ namespace llvm { SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFormalArguments(SDValue Chain, @@ -924,7 +950,7 @@ namespace llvm { bool mayBeEmittedAsTailCall(CallInst *CI) const override; - MVT getTypeForExtArgOrReturn(MVT VT, + EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT, ISD::NodeType ExtendKind) const override; bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, @@ -932,7 +958,7 @@ namespace llvm { const SmallVectorImpl &Outs, LLVMContext &Context) const override; - const uint16_t *getScratchRegisters(CallingConv::ID CC) const override; + const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; /// Utility function to emit atomic-load-arith operations (and, or, xor, /// nand, max, min, umax, umin). It takes the corresponding instruction to @@ -983,11 +1009,12 @@ namespace llvm { /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. - SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; + SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl, + SelectionDAG &DAG) const; /// Emit nodes that will be selected as "cmp Op0,Op1", or something /// equivalent, for use with the given x86 condition code. - SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, + SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl, SelectionDAG &DAG) const; /// Convert a comparison if required by the subtarget.