X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FTarget%2FX86%2FX86ISelLowering.h;h=9f51b53a00c434fbb59258bb4e324e1ef6e1567a;hb=26bbeece29b85cae68657aaa2af2e33d5ca67182;hp=ce9594ae3ed657c6d9d79fb501b10a876f6896e5;hpb=44697f3fc1c81644aedadf5e879fed7ff56a03da;p=oota-llvm.git diff --git a/lib/Target/X86/X86ISelLowering.h b/lib/Target/X86/X86ISelLowering.h index ce9594ae3ed..9f51b53a00c 100644 --- a/lib/Target/X86/X86ISelLowering.h +++ b/lib/Target/X86/X86ISelLowering.h @@ -15,16 +15,15 @@ #ifndef X86ISELLOWERING_H #define X86ISELLOWERING_H -#include "X86MachineFunctionInfo.h" -#include "X86RegisterInfo.h" #include "X86Subtarget.h" #include "llvm/CodeGen/CallingConvLower.h" -#include "llvm/CodeGen/FastISel.h" #include "llvm/CodeGen/SelectionDAG.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetOptions.h" namespace llvm { + class X86TargetMachine; + namespace X86ISD { // X86 Specific DAG Nodes enum NodeType { @@ -84,6 +83,9 @@ namespace llvm { /// readcyclecounter RDTSC_DAG, + /// X86 Read Time-Stamp Counter and Processor ID. + RDTSCP_DAG, + /// X86 compare and logical compare instructions. CMP, COMI, UCOMI, @@ -292,7 +294,6 @@ namespace llvm { ADD, SUB, ADC, SBB, SMUL, INC, DEC, OR, XOR, AND, - BZHI, // BZHI - Zero high bits BEXTR, // BEXTR - Bit field extract UMUL, // LOW, HI, FLAGS = umul LHS, RHS @@ -346,6 +347,8 @@ namespace llvm { // PMULUDQ - Vector multiply packed unsigned doubleword integers PMULUDQ, + // PMULUDQ - Vector multiply packed signed doubleword integers + PMULDQ, // FMA nodes FMADD, @@ -522,32 +525,32 @@ namespace llvm { //===--------------------------------------------------------------------===// // X86TargetLowering - X86 Implementation of the TargetLowering interface - class X86TargetLowering : public TargetLowering { + class X86TargetLowering final : public TargetLowering { public: explicit X86TargetLowering(X86TargetMachine &TM); - virtual unsigned getJumpTableEncoding() const; + unsigned getJumpTableEncoding() const override; - virtual MVT getScalarShiftAmountTy(EVT LHSTy) const { return MVT::i8; } + MVT getScalarShiftAmountTy(EVT LHSTy) const override { return MVT::i8; } - virtual const MCExpr * + const MCExpr * LowerCustomJumpTableEntry(const MachineJumpTableInfo *MJTI, const MachineBasicBlock *MBB, unsigned uid, - MCContext &Ctx) const; + MCContext &Ctx) const override; /// getPICJumpTableRelocaBase - Returns relocation base for the given PIC /// jumptable. - virtual SDValue getPICJumpTableRelocBase(SDValue Table, - SelectionDAG &DAG) const; - virtual const MCExpr * + SDValue getPICJumpTableRelocBase(SDValue Table, + SelectionDAG &DAG) const override; + const MCExpr * getPICJumpTableRelocBaseExpr(const MachineFunction *MF, - unsigned JTI, MCContext &Ctx) const; + unsigned JTI, MCContext &Ctx) const override; /// getByValTypeAlignment - Return the desired alignment for ByVal aggregate /// function arguments in the caller parameter area. For X86, aggregates /// that contains are placed at 16-byte boundaries while the rest are at /// 4-byte boundaries. - virtual unsigned getByValTypeAlignment(Type *Ty) const; + unsigned getByValTypeAlignment(Type *Ty) const override; /// getOptimalMemOpType - Returns the target specific optimal type for load /// and store operations as a result of memset, memcpy, and memmove @@ -560,10 +563,9 @@ namespace llvm { /// source is constant so it does not need to be loaded. /// It returns EVT::Other if the type should be determined using generic /// target-independent logic. - virtual EVT - getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, - bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, - MachineFunction &MF) const; + EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign, unsigned SrcAlign, + bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc, + MachineFunction &MF) const override; /// isSafeMemOpType - Returns true if it's safe to use load / store of the /// specified type to expand memcpy / memset inline. This is mostly true @@ -571,89 +573,92 @@ namespace llvm { /// targets without SSE2 f64 load / store are done with fldl / fstpl which /// also does type conversion. Note the specified type doesn't have to be /// legal as the hook is used before type legalization. - virtual bool isSafeMemOpType(MVT VT) const; + bool isSafeMemOpType(MVT VT) const override; /// allowsUnalignedMemoryAccesses - Returns true if the target allows /// unaligned memory accesses. of the specified type. Returns whether it /// is "fast" by reference in the second argument. - virtual bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS, - bool *Fast) const; + bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS, + bool *Fast) const override; /// LowerOperation - Provide custom lowering hooks for some operations. /// - virtual SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override; /// ReplaceNodeResults - Replace the results of node with an illegal result /// type with new values built out of custom code. /// - virtual void ReplaceNodeResults(SDNode *N, SmallVectorImpl&Results, - SelectionDAG &DAG) const; + void ReplaceNodeResults(SDNode *N, SmallVectorImpl&Results, + SelectionDAG &DAG) const override; - virtual SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const; + SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override; /// isTypeDesirableForOp - Return true if the target has native support for /// the specified value type and it is 'desirable' to use the type for the /// given node type. e.g. On x86 i16 is legal, but undesirable since i16 /// instruction encodings are longer and some i16 instructions are slow. - virtual bool isTypeDesirableForOp(unsigned Opc, EVT VT) const; + bool isTypeDesirableForOp(unsigned Opc, EVT VT) const override; /// isTypeDesirable - Return true if the target has native support for the /// specified value type and it is 'desirable' to use the type. e.g. On x86 /// i16 is legal, but undesirable since i16 instruction encodings are longer /// and some i16 instructions are slow. - virtual bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const; + bool IsDesirableToPromoteOp(SDValue Op, EVT &PVT) const override; - virtual MachineBasicBlock * + MachineBasicBlock * EmitInstrWithCustomInserter(MachineInstr *MI, - MachineBasicBlock *MBB) const; + MachineBasicBlock *MBB) const override; /// getTargetNodeName - This method returns the name of a target specific /// DAG node. - virtual const char *getTargetNodeName(unsigned Opcode) const; + const char *getTargetNodeName(unsigned Opcode) const override; /// getSetCCResultType - Return the value type to use for ISD::SETCC. - virtual EVT getSetCCResultType(LLVMContext &Context, EVT VT) const; + EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override; - /// computeMaskedBitsForTargetNode - Determine which of the bits specified + /// computeKnownBitsForTargetNode - Determine which of the bits specified /// in Mask are known to be either zero or one and return them in the /// KnownZero/KnownOne bitsets. - virtual void computeMaskedBitsForTargetNode(const SDValue Op, - APInt &KnownZero, - APInt &KnownOne, - const SelectionDAG &DAG, - unsigned Depth = 0) const; + void computeKnownBitsForTargetNode(const SDValue Op, + APInt &KnownZero, + APInt &KnownOne, + const SelectionDAG &DAG, + unsigned Depth = 0) const override; // ComputeNumSignBitsForTargetNode - Determine the number of bits in the // operation that are sign bits. - virtual unsigned ComputeNumSignBitsForTargetNode(SDValue Op, - unsigned Depth) const; + unsigned ComputeNumSignBitsForTargetNode(SDValue Op, + const SelectionDAG &DAG, + unsigned Depth) const override; - virtual bool - isGAPlusOffset(SDNode *N, const GlobalValue* &GA, int64_t &Offset) const; + bool isGAPlusOffset(SDNode *N, const GlobalValue* &GA, + int64_t &Offset) const override; SDValue getReturnAddressFrameIndex(SelectionDAG &DAG) const; - virtual bool ExpandInlineAsm(CallInst *CI) const; + bool ExpandInlineAsm(CallInst *CI) const override; - ConstraintType getConstraintType(const std::string &Constraint) const; + ConstraintType + getConstraintType(const std::string &Constraint) const override; /// Examine constraint string and operand type and determine a weight value. /// The operand object must already have been set up with the operand type. - virtual ConstraintWeight getSingleConstraintMatchWeight( - AsmOperandInfo &info, const char *constraint) const; + ConstraintWeight + getSingleConstraintMatchWeight(AsmOperandInfo &info, + const char *constraint) const override; - virtual const char *LowerXConstraint(EVT ConstraintVT) const; + const char *LowerXConstraint(EVT ConstraintVT) const override; /// LowerAsmOperandForConstraint - Lower the specified operand into the Ops /// vector. If it is invalid, don't add anything to Ops. If hasMemory is /// true it means one of the asm constraint of the inline asm instruction /// being processed is 'm'. - virtual void LowerAsmOperandForConstraint(SDValue Op, - std::string &Constraint, - std::vector &Ops, - SelectionDAG &DAG) const; + void LowerAsmOperandForConstraint(SDValue Op, + std::string &Constraint, + std::vector &Ops, + SelectionDAG &DAG) const override; /// getRegForInlineAsmConstraint - Given a physical register constraint /// (e.g. {edx}), return the register number and the register class for the @@ -661,34 +666,40 @@ namespace llvm { /// error, this returns a register number of 0. std::pair getRegForInlineAsmConstraint(const std::string &Constraint, - MVT VT) const; + MVT VT) const override; /// isLegalAddressingMode - Return true if the addressing mode represented /// by AM is legal for this target, for a load/store of the specified type. - virtual bool isLegalAddressingMode(const AddrMode &AM, Type *Ty)const; + bool isLegalAddressingMode(const AddrMode &AM, Type *Ty) const override; /// isLegalICmpImmediate - Return true if the specified immediate is legal /// icmp immediate, that is the target has icmp instructions which can /// compare a register against the immediate without having to materialize /// the immediate into a register. - virtual bool isLegalICmpImmediate(int64_t Imm) const; + bool isLegalICmpImmediate(int64_t Imm) const override; /// isLegalAddImmediate - Return true if the specified immediate is legal /// add immediate, that is the target has add instructions which can /// add a register and the immediate without having to materialize /// the immediate into a register. - virtual bool isLegalAddImmediate(int64_t Imm) const; + bool isLegalAddImmediate(int64_t Imm) const override; + /// \brief Return the cost of the scaling factor used in the addressing + /// mode represented by AM for this target, for a load/store + /// of the specified type. + /// If the AM is supported, the return value must be >= 0. + /// If the AM is not supported, it returns a negative value. + int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override; - virtual bool isVectorShiftByScalarCheap(Type *Ty) const; + bool isVectorShiftByScalarCheap(Type *Ty) const override; /// isTruncateFree - Return true if it's free to truncate a value of /// type Ty1 to type Ty2. e.g. On x86 it's free to truncate a i32 value in /// register EAX to i16 by referencing its sub-register AX. - virtual bool isTruncateFree(Type *Ty1, Type *Ty2) const; - virtual bool isTruncateFree(EVT VT1, EVT VT2) const; + bool isTruncateFree(Type *Ty1, Type *Ty2) const override; + bool isTruncateFree(EVT VT1, EVT VT2) const override; - virtual bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const; + bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override; /// isZExtFree - Return true if any actual instruction that defines a /// value of type Ty1 implicit zero-extends the value to Ty2 in the result @@ -698,44 +709,44 @@ namespace llvm { /// does not necessarily apply to truncate instructions. e.g. on x86-64, /// all instructions that define 32-bit values implicit zero-extend the /// result out to 64 bits. - virtual bool isZExtFree(Type *Ty1, Type *Ty2) const; - virtual bool isZExtFree(EVT VT1, EVT VT2) const; - virtual bool isZExtFree(SDValue Val, EVT VT2) const; + bool isZExtFree(Type *Ty1, Type *Ty2) const override; + bool isZExtFree(EVT VT1, EVT VT2) const override; + bool isZExtFree(SDValue Val, EVT VT2) const override; /// isFMAFasterThanFMulAndFAdd - Return true if an FMA operation is faster /// than a pair of fmul and fadd instructions. fmuladd intrinsics will be /// expanded to FMAs when this method returns true, otherwise fmuladd is /// expanded to fmul + fadd. - virtual bool isFMAFasterThanFMulAndFAdd(EVT VT) const; + bool isFMAFasterThanFMulAndFAdd(EVT VT) const override; /// isNarrowingProfitable - Return true if it's profitable to narrow /// operations of type VT1 to VT2. e.g. on x86, it's profitable to narrow /// from i32 to i8 but not from i32 to i16. - virtual bool isNarrowingProfitable(EVT VT1, EVT VT2) const; + bool isNarrowingProfitable(EVT VT1, EVT VT2) const override; /// isFPImmLegal - Returns true if the target can instruction select the /// specified FP immediate natively. If false, the legalizer will /// materialize the FP immediate as a load from a constant pool. - virtual bool isFPImmLegal(const APFloat &Imm, EVT VT) const; + bool isFPImmLegal(const APFloat &Imm, EVT VT) const override; /// isShuffleMaskLegal - Targets can use this to indicate that they only /// support *some* VECTOR_SHUFFLE operations, those with specific masks. /// By default, if a target supports the VECTOR_SHUFFLE node, all mask /// values are assumed to be legal. - virtual bool isShuffleMaskLegal(const SmallVectorImpl &Mask, - EVT VT) const; + bool isShuffleMaskLegal(const SmallVectorImpl &Mask, + EVT VT) const override; /// isVectorClearMaskLegal - Similar to isShuffleMaskLegal. This is /// used by Targets can use this to indicate if there is a suitable /// VECTOR_SHUFFLE that can be used to replace a VAND with a constant /// pool entry. - virtual bool isVectorClearMaskLegal(const SmallVectorImpl &Mask, - EVT VT) const; + bool isVectorClearMaskLegal(const SmallVectorImpl &Mask, + EVT VT) const override; /// ShouldShrinkFPConstant - If true, then instruction selection should /// seek to shrink the FP constant of the specified type to a smaller type /// in order to save space and / or reduce runtime. - virtual bool ShouldShrinkFPConstant(EVT VT) const { + bool ShouldShrinkFPConstant(EVT VT) const override { // Don't shrink FP constpool if SSE2 is available since cvtss2sd is more // expensive than a straight movsd. On the other hand, it's important to // shrink long double fp constant since fldt is very slow. @@ -756,7 +767,7 @@ namespace llvm { /// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine /// for fptoui. bool isTargetFTOL() const { - return Subtarget->isTargetWindows() && !Subtarget->is64Bit(); + return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit(); } /// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be @@ -767,31 +778,39 @@ namespace llvm { /// \brief Returns true if it is beneficial to convert a load of a constant /// to just the constant itself. - virtual bool shouldConvertConstantLoadToIntImm(const APInt &Imm, - Type *Ty) const; + bool shouldConvertConstantLoadToIntImm(const APInt &Imm, + Type *Ty) const override; + + /// Intel processors have a unified instruction and data cache + const char * getClearCacheBuiltinName() const override { + return nullptr; // nothing to do, move along. + } + + unsigned getRegisterByName(const char* RegName, EVT VT) const override; /// createFastISel - This method returns a target specific FastISel object, /// or null if the target does not support "fast" ISel. - virtual FastISel *createFastISel(FunctionLoweringInfo &funcInfo, - const TargetLibraryInfo *libInfo) const; + FastISel *createFastISel(FunctionLoweringInfo &funcInfo, + const TargetLibraryInfo *libInfo) const override; /// getStackCookieLocation - Return true if the target stores stack /// protector cookies at a fixed offset in some non-standard address /// space, and populates the address space and offset as /// appropriate. - virtual bool getStackCookieLocation(unsigned &AddressSpace, unsigned &Offset) const; + bool getStackCookieLocation(unsigned &AddressSpace, + unsigned &Offset) const override; SDValue BuildFILD(SDValue Op, EVT SrcVT, SDValue Chain, SDValue StackSlot, SelectionDAG &DAG) const; - virtual bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const LLVM_OVERRIDE; + bool isNoopAddrSpaceCast(unsigned SrcAS, unsigned DestAS) const override; /// \brief Reset the operation actions based on target options. - virtual void resetOperationActions(); + void resetOperationActions() override; protected: std::pair - findRepresentativeClass(MVT VT) const; + findRepresentativeClass(MVT VT) const override; private: /// Subtarget - Keep a pointer to the X86Subtarget around so that we can @@ -865,7 +884,11 @@ namespace llvm { SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const; SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; + SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const; + SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const; SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const; SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const; @@ -901,38 +924,36 @@ namespace llvm { SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const; SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const; SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const; + SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const; - virtual SDValue + SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg, const SmallVectorImpl &Ins, SDLoc dl, SelectionDAG &DAG, - SmallVectorImpl &InVals) const; - virtual SDValue - LowerCall(CallLoweringInfo &CLI, - SmallVectorImpl &InVals) const; + SmallVectorImpl &InVals) const override; + SDValue LowerCall(CallLoweringInfo &CLI, + SmallVectorImpl &InVals) const override; - virtual SDValue - LowerReturn(SDValue Chain, - CallingConv::ID CallConv, bool isVarArg, - const SmallVectorImpl &Outs, - const SmallVectorImpl &OutVals, - SDLoc dl, SelectionDAG &DAG) const; + SDValue LowerReturn(SDValue Chain, + CallingConv::ID CallConv, bool isVarArg, + const SmallVectorImpl &Outs, + const SmallVectorImpl &OutVals, + SDLoc dl, SelectionDAG &DAG) const override; - virtual bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const; + bool isUsedByReturnOnly(SDNode *N, SDValue &Chain) const override; - virtual bool mayBeEmittedAsTailCall(CallInst *CI) const; + bool mayBeEmittedAsTailCall(CallInst *CI) const override; - virtual MVT - getTypeForExtArgOrReturn(MVT VT, ISD::NodeType ExtendKind) const; + MVT getTypeForExtArgOrReturn(MVT VT, + ISD::NodeType ExtendKind) const override; - virtual bool - CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, - bool isVarArg, - const SmallVectorImpl &Outs, - LLVMContext &Context) const; + bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF, + bool isVarArg, + const SmallVectorImpl &Outs, + LLVMContext &Context) const override; - virtual const uint16_t *getScratchRegisters(CallingConv::ID CC) const; + const MCPhysReg *getScratchRegisters(CallingConv::ID CC) const override; /// Utility function to emit atomic-load-arith operations (and, or, xor, /// nand, max, min, umax, umin). It takes the corresponding instruction to @@ -983,11 +1004,12 @@ namespace llvm { /// Emit nodes that will be selected as "test Op0,Op0", or something /// equivalent, for use with the given x86 condition code. - SDValue EmitTest(SDValue Op0, unsigned X86CC, SelectionDAG &DAG) const; + SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl, + SelectionDAG &DAG) const; /// Emit nodes that will be selected as "cmp Op0,Op1", or something /// equivalent, for use with the given x86 condition code. - SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, + SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl, SelectionDAG &DAG) const; /// Convert a comparison if required by the subtarget.