//
//===----------------------------------------------------------------------===//
-#ifndef X86ISELLOWERING_H
-#define X86ISELLOWERING_H
+#ifndef LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
+#define LLVM_LIB_TARGET_X86_X86ISELLOWERING_H
-#include "X86Subtarget.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Target/TargetOptions.h"
namespace llvm {
+ class X86Subtarget;
class X86TargetMachine;
namespace X86ISD {
/// readcyclecounter
RDTSC_DAG,
+ /// X86 Read Time-Stamp Counter and Processor ID.
+ RDTSCP_DAG,
+
+ /// X86 Read Performance Monitoring Counters.
+ RDPMC_DAG,
+
/// X86 compare and logical compare instructions.
CMP, COMI, UCOMI,
ADD, SUB, ADC, SBB, SMUL,
INC, DEC, OR, XOR, AND,
- BZHI, // BZHI - Zero high bits
BEXTR, // BEXTR - Bit field extract
UMUL, // LOW, HI, FLAGS = umul LHS, RHS
KORTEST,
// Several flavors of instructions with vector shuffle behaviors.
+ PACKSS,
+ PACKUS,
+ // Intra-lane alignr
PALIGNR,
+ // AVX512 inter-lane alignr
+ VALIGN,
PSHUFD,
PSHUFHW,
PSHUFLW,
// PMULUDQ - Vector multiply packed unsigned doubleword integers
PMULUDQ,
+ // PMULUDQ - Vector multiply packed signed doubleword integers
+ PMULDQ,
// FMA nodes
FMADD,
// XTEST - Test if in transactional execution.
XTEST,
- // ATOMADD64_DAG, ATOMSUB64_DAG, ATOMOR64_DAG, ATOMAND64_DAG,
- // ATOMXOR64_DAG, ATOMNAND64_DAG, ATOMSWAP64_DAG -
- // Atomic 64-bit binary operations.
- ATOMADD64_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
- ATOMSUB64_DAG,
- ATOMOR64_DAG,
- ATOMXOR64_DAG,
- ATOMAND64_DAG,
- ATOMNAND64_DAG,
- ATOMMAX64_DAG,
- ATOMMIN64_DAG,
- ATOMUMAX64_DAG,
- ATOMUMIN64_DAG,
- ATOMSWAP64_DAG,
-
// LCMPXCHG_DAG, LCMPXCHG8_DAG, LCMPXCHG16_DAG - Compare and swap.
- LCMPXCHG_DAG,
+ LCMPXCHG_DAG = ISD::FIRST_TARGET_MEMORY_OPCODE,
LCMPXCHG8_DAG,
LCMPXCHG16_DAG,
/// own arguments. Callee pop is necessary to support tail calls.
bool isCalleePop(CallingConv::ID CallingConv,
bool is64Bit, bool IsVarArg, bool TailCallOpt);
+
+ /// AVX512 static rounding constants. These need to match the values in
+ /// avx512fintrin.h.
+ enum STATIC_ROUNDING {
+ TO_NEAREST_INT = 0,
+ TO_NEG_INF = 1,
+ TO_POS_INF = 2,
+ TO_ZERO = 3,
+ CUR_DIRECTION = 4
+ };
}
//===--------------------------------------------------------------------===//
/// legal as the hook is used before type legalization.
bool isSafeMemOpType(MVT VT) const override;
- /// allowsUnalignedMemoryAccesses - Returns true if the target allows
+ /// allowsMisalignedMemoryAccesses - Returns true if the target allows
/// unaligned memory accesses. of the specified type. Returns whether it
/// is "fast" by reference in the second argument.
- bool allowsUnalignedMemoryAccesses(EVT VT, unsigned AS,
+ bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AS, unsigned Align,
bool *Fast) const override;
/// LowerOperation - Provide custom lowering hooks for some operations.
/// getSetCCResultType - Return the value type to use for ISD::SETCC.
EVT getSetCCResultType(LLVMContext &Context, EVT VT) const override;
- /// computeMaskedBitsForTargetNode - Determine which of the bits specified
+ /// computeKnownBitsForTargetNode - Determine which of the bits specified
/// in Mask are known to be either zero or one and return them in the
/// KnownZero/KnownOne bitsets.
- void computeMaskedBitsForTargetNode(const SDValue Op,
- APInt &KnownZero,
- APInt &KnownOne,
- const SelectionDAG &DAG,
- unsigned Depth = 0) const override;
+ void computeKnownBitsForTargetNode(const SDValue Op,
+ APInt &KnownZero,
+ APInt &KnownOne,
+ const SelectionDAG &DAG,
+ unsigned Depth = 0) const override;
// ComputeNumSignBitsForTargetNode - Determine the number of bits in the
// operation that are sign bits.
/// the immediate into a register.
bool isLegalAddImmediate(int64_t Imm) const override;
+ /// \brief Return the cost of the scaling factor used in the addressing
+ /// mode represented by AM for this target, for a load/store
+ /// of the specified type.
+ /// If the AM is supported, the return value must be >= 0.
+ /// If the AM is not supported, it returns a negative value.
+ int getScalingFactorCost(const AddrMode &AM, Type *Ty) const override;
bool isVectorShiftByScalarCheap(Type *Ty) const override;
/// isTargetFTOL - Return true if the target uses the MSVC _ftol2 routine
/// for fptoui.
- bool isTargetFTOL() const {
- return Subtarget->isTargetKnownWindowsMSVC() && !Subtarget->is64Bit();
- }
+ bool isTargetFTOL() const;
/// isIntegerTypeFTOL - Return true if the MSVC _ftol2 routine should be
/// used for fptoui to the given type.
Type *Ty) const override;
/// Intel processors have a unified instruction and data cache
- const char * getClearCacheBuiltinName() const {
- return 0; // nothing to do, move along.
+ const char * getClearCacheBuiltinName() const override {
+ return nullptr; // nothing to do, move along.
}
+ unsigned getRegisterByName(const char* RegName, EVT VT) const override;
+
/// createFastISel - This method returns a target specific FastISel object,
/// or null if the target does not support "fast" ISel.
FastISel *createFastISel(FunctionLoweringInfo &funcInfo,
/// \brief Reset the operation actions based on target options.
void resetOperationActions() override;
+ bool useLoadStackGuardNode() const override;
+ /// \brief Customize the preferred legalization strategy for certain types.
+ LegalizeTypeAction getPreferredVectorAction(EVT VT) const override;
+
protected:
std::pair<const TargetRegisterClass*, uint8_t>
findRepresentativeClass(MVT VT) const override;
SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerBUILD_VECTORvXi1(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
SDValue ExtractBitFromMaskVector(SDValue Op, SelectionDAG &DAG) const;
SDValue InsertBitToMaskVector(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerINIT_TRAMPOLINE(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerFLT_ROUNDS_(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerSIGN_EXTEND_INREG(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerWin64_i128OP(SDValue Op, SelectionDAG &DAG) const;
SDValue
LowerFormalArguments(SDValue Chain,
bool mayBeEmittedAsTailCall(CallInst *CI) const override;
- MVT getTypeForExtArgOrReturn(MVT VT,
+ EVT getTypeForExtArgOrReturn(LLVMContext &Context, EVT VT,
ISD::NodeType ExtendKind) const override;
bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
/// Emit nodes that will be selected as "test Op0,Op0", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitTest(SDLoc dl, SDValue Op0, unsigned X86CC,
+ SDValue EmitTest(SDValue Op0, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const;
/// Emit nodes that will be selected as "cmp Op0,Op1", or something
/// equivalent, for use with the given x86 condition code.
- SDValue EmitCmp(SDLoc dl, SDValue Op0, SDValue Op1, unsigned X86CC,
+ SDValue EmitCmp(SDValue Op0, SDValue Op1, unsigned X86CC, SDLoc dl,
SelectionDAG &DAG) const;
/// Convert a comparison if required by the subtarget.