#include "llvm/Intrinsics.h"
#include "llvm/ADT/BitVector.h"
#include "llvm/ADT/VectorExtras.h"
-#include "llvm/Analysis/ScalarEvolutionExpressions.h"
#include "llvm/CodeGen/CallingConvLower.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
// Forward declarations.
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG);
-X86TargetLowering::X86TargetLowering(TargetMachine &TM)
+X86TargetLowering::X86TargetLowering(X86TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
X86ScalarSSEf64 = Subtarget->hasSSE2();
// will selectively turn on ones that can be effectively codegen'd.
for (unsigned VT = (unsigned)MVT::FIRST_VECTOR_VALUETYPE;
VT <= (unsigned)MVT::LAST_VECTOR_VALUETYPE; ++VT) {
- setOperationAction(ISD::ADD , (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SUB , (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FADD, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FNEG, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FSUB, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::MUL , (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FMUL, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FDIV, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FABS, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FSIN, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FCOS, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FPOWI, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FSQRT, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FCOPYSIGN, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SMUL_LOHI, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UMUL_LOHI, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SDIVREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::UDIVREM, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::FPOW, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTPOP, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTTZ, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::CTLZ, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SHL, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SRA, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::SRL, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::ROTL, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::ROTR, (MVT::ValueType)VT, Expand);
- setOperationAction(ISD::BSWAP, (MVT::ValueType)VT, Expand);
+ setOperationAction(ISD::ADD , (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SUB , (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FADD, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FNEG, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FSUB, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::MUL , (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FMUL, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SDIV, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UDIV, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FDIV, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::INSERT_VECTOR_ELT, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FABS, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FSIN, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FCOS, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FPOWI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FSQRT, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FCOPYSIGN, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UMUL_LOHI, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SDIVREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::UDIVREM, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::FPOW, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::CTPOP, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::CTTZ, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::CTLZ, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SHL, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SRA, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::SRL, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ROTL, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::ROTR, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::BSWAP, (MVT::SimpleValueType)VT, Expand);
+ setOperationAction(ISD::VSETCC, (MVT::SimpleValueType)VT, Expand);
}
if (Subtarget->hasMMX()) {
setOperationAction(ISD::VECTOR_SHUFFLE, MVT::v4f32, Custom);
setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::v4f32, Custom);
setOperationAction(ISD::SELECT, MVT::v4f32, Custom);
+ setOperationAction(ISD::VSETCC, MVT::v4f32, Legal);
}
if (Subtarget->hasSSE2()) {
setOperationAction(ISD::FSQRT, MVT::v2f64, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Custom);
+ setOperationAction(ISD::VSETCC, MVT::v2f64, Legal);
+ setOperationAction(ISD::VSETCC, MVT::v16i8, Legal);
+ setOperationAction(ISD::VSETCC, MVT::v8i16, Legal);
+ setOperationAction(ISD::VSETCC, MVT::v4i32, Legal);
+ setOperationAction(ISD::VSETCC, MVT::v2i64, Legal);
+
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v16i8, Custom);
setOperationAction(ISD::SCALAR_TO_VECTOR, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v8i16, Custom);
setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::v4f32, Custom);
// Custom lower build_vector, vector_shuffle, and extract_vector_elt.
- for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
+ for (unsigned i = (unsigned)MVT::v16i8; i != (unsigned)MVT::v2i64; ++i) {
+ MVT VT = (MVT::SimpleValueType)i;
// Do not attempt to custom lower non-power-of-2 vectors
- if (!isPowerOf2_32(MVT::getVectorNumElements(VT)))
+ if (!isPowerOf2_32(VT.getVectorNumElements()))
continue;
- setOperationAction(ISD::BUILD_VECTOR, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::VECTOR_SHUFFLE, (MVT::ValueType)VT, Custom);
- setOperationAction(ISD::EXTRACT_VECTOR_ELT, (MVT::ValueType)VT, Custom);
+ setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
+ setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
+ setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
}
setOperationAction(ISD::BUILD_VECTOR, MVT::v2f64, Custom);
setOperationAction(ISD::BUILD_VECTOR, MVT::v2i64, Custom);
// Promote v16i8, v8i16, v4i32 load, select, and, or, xor to v2i64.
for (unsigned VT = (unsigned)MVT::v16i8; VT != (unsigned)MVT::v2i64; VT++) {
- setOperationAction(ISD::AND, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::AND, (MVT::ValueType)VT, MVT::v2i64);
- setOperationAction(ISD::OR, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::OR, (MVT::ValueType)VT, MVT::v2i64);
- setOperationAction(ISD::XOR, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::XOR, (MVT::ValueType)VT, MVT::v2i64);
- setOperationAction(ISD::LOAD, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::LOAD, (MVT::ValueType)VT, MVT::v2i64);
- setOperationAction(ISD::SELECT, (MVT::ValueType)VT, Promote);
- AddPromotedToType (ISD::SELECT, (MVT::ValueType)VT, MVT::v2i64);
+ setOperationAction(ISD::AND, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::AND, (MVT::SimpleValueType)VT, MVT::v2i64);
+ setOperationAction(ISD::OR, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::OR, (MVT::SimpleValueType)VT, MVT::v2i64);
+ setOperationAction(ISD::XOR, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::XOR, (MVT::SimpleValueType)VT, MVT::v2i64);
+ setOperationAction(ISD::LOAD, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::LOAD, (MVT::SimpleValueType)VT, MVT::v2i64);
+ setOperationAction(ISD::SELECT, (MVT::SimpleValueType)VT, Promote);
+ AddPromotedToType (ISD::SELECT, (MVT::SimpleValueType)VT, MVT::v2i64);
}
setTruncStoreAction(MVT::f64, MVT::f32, Expand);
setOperationAction(ISD::LOAD, MVT::v2i64, Legal);
setOperationAction(ISD::SELECT, MVT::v2f64, Custom);
setOperationAction(ISD::SELECT, MVT::v2i64, Custom);
+
}
if (Subtarget->hasSSE41()) {
// FIXME: Do we need to handle scalar-to-vector here?
setOperationAction(ISD::MUL, MVT::v4i32, Legal);
+ setOperationAction(ISD::MUL, MVT::v2i64, Legal);
// i8 and i16 vectors are custom , because the source register and source
// source memory operand types are not the same width. f32 vectors are
// be smaller when we are in optimizing for size mode.
maxStoresPerMemset = 16; // For %llvm.memset -> sequence of stores
maxStoresPerMemcpy = 16; // For %llvm.memcpy -> sequence of stores
- maxStoresPerMemmove = 16; // For %llvm.memmove -> sequence of stores
+ maxStoresPerMemmove = 3; // For %llvm.memmove -> sequence of stores
allowUnalignedMemoryAccesses = true; // x86 supports it!
setPrefLoopAlignment(16);
}
-MVT::ValueType
-X86TargetLowering::getSetCCResultType(const SDOperand &) const {
+MVT X86TargetLowering::getSetCCResultType(const SDOperand &) const {
return MVT::i8;
}
return Align;
}
+/// getOptimalMemOpType - Returns the target specific optimal type for load
+/// and store operations as a result of memset, memcpy, and memmove
+/// lowering. It returns MVT::iAny if SelectionDAG should be responsible for
+/// determining it.
+MVT
+X86TargetLowering::getOptimalMemOpType(uint64_t Size, unsigned Align,
+ bool isSrcConst, bool isSrcStr) const {
+ if ((isSrcConst || isSrcStr) && Subtarget->hasSSE2() && Size >= 16)
+ return MVT::v4i32;
+ if ((isSrcConst || isSrcStr) && Subtarget->hasSSE1() && Size >= 16)
+ return MVT::v4f32;
+ if (Subtarget->is64Bit() && Size >= 8)
+ return MVT::i64;
+ return MVT::i32;
+}
+
+
/// getPICJumpTableRelocaBase - Returns relocation base for the given PIC
/// jumptable.
SDOperand X86TargetLowering::getPICJumpTableRelocBase(SDOperand Table,
// Copy all of the result registers out of their specified physreg.
for (unsigned i = 0; i != RVLocs.size(); ++i) {
- MVT::ValueType CopyVT = RVLocs[i].getValVT();
+ MVT CopyVT = RVLocs[i].getValVT();
// If this is a call to a function that returns an fp value on the floating
// point stack, but where we prefer to use the value in xmm registers, copy
// changed with more analysis.
// In case of tail call optimization mark all arguments mutable. Since they
// could be overwritten by lowering of arguments in case of a tail call.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ int FI = MFI->CreateFixedObject(VA.getValVT().getSizeInBits()/8,
VA.getLocMemOffset(), isImmutable);
SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
if (Flags.isByVal())
LastVal = VA.getValNo();
if (VA.isRegLoc()) {
- MVT::ValueType RegVT = VA.getLocVT();
+ MVT RegVT = VA.getLocVT();
TargetRegisterClass *RC;
if (RegVT == MVT::i32)
RC = X86::GR32RegisterClass;
RC = X86::FR32RegisterClass;
else if (RegVT == MVT::f64)
RC = X86::FR64RegisterClass;
- else if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 128)
+ else if (RegVT.isVector() && RegVT.getSizeInBits() == 128)
RC = X86::VR128RegisterClass;
- else if (MVT::isVector(RegVT)) {
- assert(MVT::getSizeInBits(RegVT) == 64);
+ else if (RegVT.isVector()) {
+ assert(RegVT.getSizeInBits() == 64);
if (!Is64Bit)
RC = X86::VR64RegisterClass; // MMX values are passed in MMXs.
else {
// Handle MMX values passed in GPRs.
if (Is64Bit && RegVT != VA.getLocVT()) {
- if (MVT::getSizeInBits(RegVT) == 64 && RC == X86::GR64RegisterClass)
+ if (RegVT.getSizeInBits() == 64 && RC == X86::GR64RegisterClass)
ArgValue = DAG.getNode(ISD::BIT_CONVERT, VA.getLocVT(), ArgValue);
else if (RC == X86::VR128RegisterClass) {
ArgValue = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i64, ArgValue,
if (!IsTailCall || FPDiff==0) return Chain;
// Adjust the Return address stack slot.
- MVT::ValueType VT = getPointerTy();
+ MVT VT = getPointerTy();
OutRetAddr = getReturnAddressFrameIndex(DAG);
// Load the "old" Return address.
OutRetAddr = DAG.getLoad(VT, Chain,OutRetAddr, NULL, 0);
int SlotSize = Is64Bit ? 8 : 4;
int NewReturnAddrFI =
MF.getFrameInfo()->CreateFixedObject(SlotSize, FPDiff-SlotSize);
- MVT::ValueType VT = Is64Bit ? MVT::i64 : MVT::i32;
+ MVT VT = Is64Bit ? MVT::i64 : MVT::i32;
SDOperand NewRetAddrFrIdx = DAG.getFrameIndex(NewReturnAddrFI, VT);
Chain = DAG.getStore(Chain, RetAddrFrIdx, NewRetAddrFrIdx,
PseudoSourceValue::getFixedStack(), NewReturnAddrFI);
if (VA.isRegLoc()) {
if (Is64Bit) {
- MVT::ValueType RegVT = VA.getLocVT();
- if (MVT::isVector(RegVT) && MVT::getSizeInBits(RegVT) == 64)
+ MVT RegVT = VA.getLocVT();
+ if (RegVT.isVector() && RegVT.getSizeInBits() == 64)
switch (VA.getLocReg()) {
default:
break;
cast<ARG_FLAGSSDNode>(FlagsOp)->getArgFlags();
// Create frame index.
int32_t Offset = VA.getLocMemOffset()+FPDiff;
- uint32_t OpSize = (MVT::getSizeInBits(VA.getLocVT())+7)/8;
+ uint32_t OpSize = (VA.getLocVT().getSizeInBits()+7)/8;
FI = MF.getFrameInfo()->CreateFixedObject(OpSize, Offset);
FIN = DAG.getFrameIndex(FI, getPointerTy());
static SDOperand CommuteVectorShuffle(SDOperand Op, SDOperand &V1,
SDOperand &V2, SDOperand &Mask,
SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType MaskVT = Mask.getValueType();
- MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
+ MVT VT = Op.getValueType();
+ MVT MaskVT = Mask.getValueType();
+ MVT EltVT = MaskVT.getVectorElementType();
unsigned NumElems = Mask.getNumOperands();
SmallVector<SDOperand, 8> MaskVec;
/// the two vector operands have swapped position.
static
SDOperand CommuteVectorShuffleMask(SDOperand Mask, SelectionDAG &DAG) {
- MVT::ValueType MaskVT = Mask.getValueType();
- MVT::ValueType EltVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = Mask.getValueType();
+ MVT EltVT = MaskVT.getVectorElementType();
unsigned NumElems = Mask.getNumOperands();
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != NumElems; ++i) {
/// getZeroVector - Returns a vector of specified type with all zero elements.
///
-static SDOperand getZeroVector(MVT::ValueType VT, SelectionDAG &DAG) {
- assert(MVT::isVector(VT) && "Expected a vector type");
+static SDOperand getZeroVector(MVT VT, bool HasSSE2, SelectionDAG &DAG) {
+ assert(VT.isVector() && "Expected a vector type");
// Always build zero vectors as <4 x i32> or <2 x i32> bitcasted to their dest
// type. This ensures they get CSE'd.
- SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
SDOperand Vec;
- if (MVT::getSizeInBits(VT) == 64) // MMX
+ if (VT.getSizeInBits() == 64) { // MMX
+ SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
- else // SSE
+ } else if (HasSSE2) { // SSE2
+ SDOperand Cst = DAG.getTargetConstant(0, MVT::i32);
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
+ } else { // SSE1
+ SDOperand Cst = DAG.getTargetConstantFP(+0.0, MVT::f32);
+ Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4f32, Cst, Cst, Cst, Cst);
+ }
return DAG.getNode(ISD::BIT_CONVERT, VT, Vec);
}
/// getOnesVector - Returns a vector of specified type with all bits set.
///
-static SDOperand getOnesVector(MVT::ValueType VT, SelectionDAG &DAG) {
- assert(MVT::isVector(VT) && "Expected a vector type");
+static SDOperand getOnesVector(MVT VT, SelectionDAG &DAG) {
+ assert(VT.isVector() && "Expected a vector type");
// Always build ones vectors as <4 x i32> or <2 x i32> bitcasted to their dest
// type. This ensures they get CSE'd.
SDOperand Cst = DAG.getTargetConstant(~0U, MVT::i32);
SDOperand Vec;
- if (MVT::getSizeInBits(VT) == 64) // MMX
+ if (VT.getSizeInBits() == 64) // MMX
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v2i32, Cst, Cst);
else // SSE
Vec = DAG.getNode(ISD::BUILD_VECTOR, MVT::v4i32, Cst, Cst, Cst, Cst);
/// getMOVLMask - Returns a vector_shuffle mask for an movs{s|d}, movd
/// operation of specified width.
static SDOperand getMOVLMask(unsigned NumElems, SelectionDAG &DAG) {
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT BaseVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 8> MaskVec;
MaskVec.push_back(DAG.getConstant(NumElems, BaseVT));
/// getUnpacklMask - Returns a vector_shuffle mask for an unpackl operation
/// of specified width.
static SDOperand getUnpacklMask(unsigned NumElems, SelectionDAG &DAG) {
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT BaseVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0, e = NumElems/2; i != e; ++i) {
MaskVec.push_back(DAG.getConstant(i, BaseVT));
/// getUnpackhMask - Returns a vector_shuffle mask for an unpackh operation
/// of specified width.
static SDOperand getUnpackhMask(unsigned NumElems, SelectionDAG &DAG) {
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT BaseVT = MaskVT.getVectorElementType();
unsigned Half = NumElems/2;
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i != Half; ++i) {
/// elements in place.
static SDOperand getSwapEltZeroMask(unsigned NumElems, unsigned DestElt,
SelectionDAG &DAG) {
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType BaseVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT BaseVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 8> MaskVec;
// Element #0 of the result gets the elt we are replacing.
MaskVec.push_back(DAG.getConstant(DestElt, BaseVT));
/// PromoteSplat - Promote a splat of v4f32, v8i16 or v16i8 to v4i32.
static SDOperand PromoteSplat(SDOperand Op, SelectionDAG &DAG, bool HasSSE2) {
- MVT::ValueType PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32;
- MVT::ValueType VT = Op.getValueType();
+ MVT PVT = HasSSE2 ? MVT::v4i32 : MVT::v4f32;
+ MVT VT = Op.getValueType();
if (PVT == VT)
return Op;
SDOperand V1 = Op.getOperand(0);
V1 = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V1, Mask);
NumElems >>= 1;
}
- Mask = getZeroVector(MVT::v4i32, DAG);
+ Mask = getZeroVector(MVT::v4i32, true, DAG);
}
V1 = DAG.getNode(ISD::BIT_CONVERT, PVT, V1);
/// element of V2 is swizzled into the zero/undef vector, landing at element
/// Idx. This produces a shuffle mask like 4,1,2,3 (idx=0) or 0,1,2,4 (idx=3).
static SDOperand getShuffleVectorZeroOrUndef(SDOperand V2, unsigned Idx,
- bool isZero, SelectionDAG &DAG) {
- MVT::ValueType VT = V2.getValueType();
- SDOperand V1 = isZero ? getZeroVector(VT, DAG) : DAG.getNode(ISD::UNDEF, VT);
- unsigned NumElems = MVT::getVectorNumElements(V2.getValueType());
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
+ bool isZero, bool HasSSE2,
+ SelectionDAG &DAG) {
+ MVT VT = V2.getValueType();
+ SDOperand V1 = isZero
+ ? getZeroVector(VT, HasSSE2, DAG) : DAG.getNode(ISD::UNDEF, VT);
+ unsigned NumElems = V2.getValueType().getVectorNumElements();
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT EVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 16> MaskVec;
for (unsigned i = 0; i != NumElems; ++i)
if (i == Idx) // If this is the insertion idx, put the low elt of V2 here.
return DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2, Mask);
}
+/// getNumOfConsecutiveZeros - Return the number of elements in a result of
+/// a shuffle that is zero.
+static
+unsigned getNumOfConsecutiveZeros(SDOperand Op, SDOperand Mask,
+ unsigned NumElems, bool Low,
+ SelectionDAG &DAG) {
+ unsigned NumZeros = 0;
+ for (unsigned i = 0; i < NumElems; ++i) {
+ SDOperand Idx = Mask.getOperand(Low ? i : NumElems-i-1);
+ if (Idx.getOpcode() == ISD::UNDEF) {
+ ++NumZeros;
+ continue;
+ }
+ unsigned Index = cast<ConstantSDNode>(Idx)->getValue();
+ SDOperand Elt = DAG.getShuffleScalarElt(Op.Val, Index);
+ if (Elt.Val && isZeroNode(Elt))
+ ++NumZeros;
+ else
+ break;
+ }
+ return NumZeros;
+}
+
+/// isVectorShift - Returns true if the shuffle can be implemented as a
+/// logical left or right shift of a vector.
+static bool isVectorShift(SDOperand Op, SDOperand Mask, SelectionDAG &DAG,
+ bool &isLeft, SDOperand &ShVal, unsigned &ShAmt) {
+ unsigned NumElems = Mask.getNumOperands();
+
+ isLeft = true;
+ unsigned NumZeros= getNumOfConsecutiveZeros(Op, Mask, NumElems, true, DAG);
+ if (!NumZeros) {
+ isLeft = false;
+ NumZeros = getNumOfConsecutiveZeros(Op, Mask, NumElems, false, DAG);
+ if (!NumZeros)
+ return false;
+ }
+
+ bool SeenV1 = false;
+ bool SeenV2 = false;
+ for (unsigned i = NumZeros; i < NumElems; ++i) {
+ unsigned Val = isLeft ? (i - NumZeros) : i;
+ SDOperand Idx = Mask.getOperand(isLeft ? i : (i - NumZeros));
+ if (Idx.getOpcode() == ISD::UNDEF)
+ continue;
+ unsigned Index = cast<ConstantSDNode>(Idx)->getValue();
+ if (Index < NumElems)
+ SeenV1 = true;
+ else {
+ Index -= NumElems;
+ SeenV2 = true;
+ }
+ if (Index != Val)
+ return false;
+ }
+ if (SeenV1 && SeenV2)
+ return false;
+
+ ShVal = SeenV1 ? Op.getOperand(0) : Op.getOperand(1);
+ ShAmt = NumZeros;
+ return true;
+}
+
+
/// LowerBuildVectorv16i8 - Custom lower build_vector of v16i8.
///
static SDOperand LowerBuildVectorv16i8(SDOperand Op, unsigned NonZeros,
bool ThisIsNonZero = (NonZeros & (1 << i)) != 0;
if (ThisIsNonZero && First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, DAG);
+ V = getZeroVector(MVT::v8i16, true, DAG);
else
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
First = false;
if (isNonZero) {
if (First) {
if (NumZero)
- V = getZeroVector(MVT::v8i16, DAG);
+ V = getZeroVector(MVT::v8i16, true, DAG);
else
V = DAG.getNode(ISD::UNDEF, MVT::v8i16);
First = false;
return V;
}
+/// getVShift - Return a vector logical shift node.
+///
+static SDOperand getVShift(bool isLeft, MVT VT, SDOperand SrcOp,
+ unsigned NumBits, SelectionDAG &DAG,
+ const TargetLowering &TLI) {
+ bool isMMX = VT.getSizeInBits() == 64;
+ MVT ShVT = isMMX ? MVT::v1i64 : MVT::v2i64;
+ unsigned Opc = isLeft ? X86ISD::VSHL : X86ISD::VSRL;
+ SrcOp = DAG.getNode(ISD::BIT_CONVERT, ShVT, SrcOp);
+ return DAG.getNode(ISD::BIT_CONVERT, VT,
+ DAG.getNode(Opc, ShVT, SrcOp,
+ DAG.getConstant(NumBits, TLI.getShiftAmountTy())));
+}
+
SDOperand
X86TargetLowering::LowerBUILD_VECTOR(SDOperand Op, SelectionDAG &DAG) {
// All zero's are handled with pxor, all one's are handled with pcmpeqd.
if (ISD::isBuildVectorAllOnes(Op.Val))
return getOnesVector(Op.getValueType(), DAG);
- return getZeroVector(Op.getValueType(), DAG);
+ return getZeroVector(Op.getValueType(), Subtarget->hasSSE2(), DAG);
}
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EVT = MVT::getVectorElementType(VT);
- unsigned EVTBits = MVT::getSizeInBits(EVT);
+ MVT VT = Op.getValueType();
+ MVT EVT = VT.getVectorElementType();
+ unsigned EVTBits = EVT.getSizeInBits();
unsigned NumElems = Op.getNumOperands();
unsigned NumZero = 0;
(!IsAllConstants || Idx == 0)) {
if (DAG.MaskedValueIsZero(Item, APInt::getBitsSet(64, 32, 64))) {
// Handle MMX and SSE both.
- MVT::ValueType VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
- MVT::ValueType VecElts = VT == MVT::v2i64 ? 4 : 2;
+ MVT VecVT = VT == MVT::v2i64 ? MVT::v4i32 : MVT::v2i32;
+ unsigned VecElts = VT == MVT::v2i64 ? 4 : 2;
// Truncate the value (which may itself be a constant) to i32, and
// convert it to a vector with movd (S2V+shuffle to zero extend).
Item = DAG.getNode(ISD::TRUNCATE, MVT::i32, Item);
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VecVT, Item);
- Item = getShuffleVectorZeroOrUndef(Item, 0, true, DAG);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, true,
+ Subtarget->hasSSE2(), DAG);
// Now we have our 32-bit value zero extended in the low element of
// a vector. If Idx != 0, swizzle it into place.
(EVT != MVT::i64 || Subtarget->is64Bit())) {
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
// Turn it into a MOVL (i.e. movss, movsd, or movd) to a zero vector.
- return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG);
+ return getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
+ Subtarget->hasSSE2(), DAG);
+ }
+
+ // Is it a vector logical left shift?
+ if (NumElems == 2 && Idx == 1 &&
+ isZeroNode(Op.getOperand(0)) && !isZeroNode(Op.getOperand(1))) {
+ unsigned NumBits = VT.getSizeInBits();
+ return getVShift(true, VT,
+ DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(1)),
+ NumBits/2, DAG, *this);
}
if (IsAllConstants) // Otherwise, it's better to do a constpool load.
Item = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Item);
// Turn it into a shuffle of zero and zero-extended scalar to vector.
- Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0, DAG);
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
+ Item = getShuffleVectorZeroOrUndef(Item, 0, NumZero > 0,
+ Subtarget->hasSSE2(), DAG);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT MaskEVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 8> MaskVec;
for (unsigned i = 0; i < NumElems; i++)
MaskVec.push_back(DAG.getConstant((i == Idx) ? 0 : 1, MaskEVT));
unsigned Idx = CountTrailingZeros_32(NonZeros);
SDOperand V2 = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT,
Op.getOperand(Idx));
- return getShuffleVectorZeroOrUndef(V2, Idx, true, DAG);
+ return getShuffleVectorZeroOrUndef(V2, Idx, true,
+ Subtarget->hasSSE2(), DAG);
}
return SDOperand();
}
for (unsigned i = 0; i < 4; ++i) {
bool isZero = !(NonZeros & (1 << i));
if (isZero)
- V[i] = getZeroVector(VT, DAG);
+ V[i] = getZeroVector(VT, Subtarget->hasSSE2(), DAG);
else
V[i] = DAG.getNode(ISD::SCALAR_TO_VECTOR, VT, Op.getOperand(i));
}
}
}
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NumElems);
- MVT::ValueType EVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NumElems);
+ MVT EVT = MaskVT.getVectorElementType();
SmallVector<SDOperand, 8> MaskVec;
bool Reverse = (NonZeros & 0x3) == 2;
for (unsigned i = 0; i < 2; ++i)
SDOperand PermMask, SelectionDAG &DAG,
TargetLowering &TLI) {
SDOperand NewV;
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(8);
- MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
- MVT::ValueType PtrVT = TLI.getPointerTy();
+ MVT MaskVT = MVT::getIntVectorWithNumElements(8);
+ MVT MaskEVT = MaskVT.getVectorElementType();
+ MVT PtrVT = TLI.getPointerTy();
SmallVector<SDOperand, 8> MaskElts(PermMask.Val->op_begin(),
PermMask.Val->op_end());
continue;
SDOperand Elt = MaskElts[i];
unsigned EltIdx = cast<ConstantSDNode>(Elt)->getValue();
- if (EltIdx == i)
- continue;
SDOperand ExtOp = (EltIdx < 8)
? DAG.getNode(ISD::EXTRACT_VECTOR_ELT, MVT::i16, V1,
DAG.getConstant(EltIdx, PtrVT))
/// vector_shuffle <>, <>, < 3, 4, | 10, 11, | 0, 1, | 14, 15>
static
SDOperand RewriteAsNarrowerShuffle(SDOperand V1, SDOperand V2,
- MVT::ValueType VT,
+ MVT VT,
SDOperand PermMask, SelectionDAG &DAG,
TargetLowering &TLI) {
unsigned NumElems = PermMask.getNumOperands();
unsigned NewWidth = (NumElems == 4) ? 2 : 4;
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
- MVT::ValueType NewVT = MaskVT;
- switch (VT) {
+ MVT MaskVT = MVT::getIntVectorWithNumElements(NewWidth);
+ MVT NewVT = MaskVT;
+ switch (VT.getSimpleVT()) {
+ default: assert(false && "Unexpected!");
case MVT::v4f32: NewVT = MVT::v2f64; break;
case MVT::v4i32: NewVT = MVT::v2i64; break;
case MVT::v8i16: NewVT = MVT::v4i32; break;
case MVT::v16i8: NewVT = MVT::v4i32; break;
- default: assert(false && "Unexpected!");
}
if (NewWidth == 2) {
- if (MVT::isInteger(VT))
+ if (VT.isInteger())
NewVT = MVT::v2i64;
else
NewVT = MVT::v2f64;
/// getVZextMovL - Return a zero-extending vector move low node.
///
-static SDOperand getVZextMovL(MVT::ValueType VT, MVT::ValueType OpVT,
- SDOperand SrcOp, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+static SDOperand getVZextMovL(MVT VT, MVT OpVT,
+ SDOperand SrcOp, SelectionDAG &DAG,
+ const X86Subtarget *Subtarget) {
if (VT == MVT::v2f64 || VT == MVT::v4f32) {
LoadSDNode *LD = NULL;
if (!isScalarLoadToVector(SrcOp.Val, &LD))
if (!LD) {
// movssrr and movsdrr do not clear top bits. Try to use movd, movq
// instead.
- MVT::ValueType EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
+ MVT EVT = (OpVT == MVT::v2f64) ? MVT::i64 : MVT::i32;
if ((EVT != MVT::i64 || Subtarget->is64Bit()) &&
SrcOp.getOpcode() == ISD::SCALAR_TO_VECTOR &&
SrcOp.getOperand(0).getOpcode() == ISD::BIT_CONVERT &&
SDOperand V1 = Op.getOperand(0);
SDOperand V2 = Op.getOperand(1);
SDOperand PermMask = Op.getOperand(2);
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
unsigned NumElems = PermMask.getNumOperands();
- bool isMMX = MVT::getSizeInBits(VT) == 64;
+ bool isMMX = VT.getSizeInBits() == 64;
bool V1IsUndef = V1.getOpcode() == ISD::UNDEF;
bool V2IsUndef = V2.getOpcode() == ISD::UNDEF;
bool V1IsSplat = false;
return DAG.getNode(ISD::UNDEF, VT);
if (isZeroShuffle(Op.Val))
- return getZeroVector(VT, DAG);
+ return getZeroVector(VT, Subtarget->hasSSE2(), DAG);
if (isIdentityMask(PermMask.Val))
return V1;
}
}
+ // Check if this can be converted into a logical shift.
+ bool isLeft = false;
+ unsigned ShAmt = 0;
+ SDOperand ShVal;
+ bool isShift = isVectorShift(Op, PermMask, DAG, isLeft, ShVal, ShAmt);
+ if (isShift && ShVal.hasOneUse()) {
+ // If the shifted value has multiple uses, it may be cheaper to use
+ // v_set0 + movlhps or movhlps, etc.
+ MVT EVT = VT.getVectorElementType();
+ ShAmt *= EVT.getSizeInBits();
+ return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this);
+ }
+
if (X86::isMOVLMask(PermMask.Val)) {
if (V1IsUndef)
return V2;
ShouldXformToMOVLP(V1.Val, V2.Val, PermMask.Val))
return CommuteVectorShuffle(Op, V1, V2, PermMask, DAG);
+ if (isShift) {
+ // No better options. Use a vshl / vsrl.
+ MVT EVT = VT.getVectorElementType();
+ ShAmt *= EVT.getSizeInBits();
+ return getVShift(isLeft, VT, ShVal, ShAmt, DAG, *this);
+ }
+
bool Commuted = false;
// FIXME: This should also accept a bitcast of a splat? Be careful, not
// 1,1,1,1 -> v8i16 though.
(X86::isPSHUFDMask(PermMask.Val) ||
X86::isPSHUFHWMask(PermMask.Val) ||
X86::isPSHUFLWMask(PermMask.Val))) {
- MVT::ValueType RVT = VT;
+ MVT RVT = VT;
if (VT == MVT::v4f32) {
RVT = MVT::v4i32;
Op = DAG.getNode(ISD::VECTOR_SHUFFLE, RVT,
// Handle all 4 wide cases with a number of shuffles.
if (NumElems == 4 && !isMMX) {
// Don't do this for MMX.
- MVT::ValueType MaskVT = PermMask.getValueType();
- MVT::ValueType MaskEVT = MVT::getVectorElementType(MaskVT);
+ MVT MaskVT = PermMask.getValueType();
+ MVT MaskEVT = MaskVT.getVectorElementType();
SmallVector<std::pair<int, int>, 8> Locs;
Locs.reserve(NumElems);
SmallVector<SDOperand, 8> Mask1(NumElems,
SDOperand
X86TargetLowering::LowerEXTRACT_VECTOR_ELT_SSE4(SDOperand Op,
SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- if (MVT::getSizeInBits(VT) == 8) {
+ MVT VT = Op.getValueType();
+ if (VT.getSizeInBits() == 8) {
SDOperand Extract = DAG.getNode(X86ISD::PEXTRB, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
- } else if (MVT::getSizeInBits(VT) == 16) {
+ } else if (VT.getSizeInBits() == 16) {
SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, MVT::i32,
Op.getOperand(0), Op.getOperand(1));
SDOperand Assert = DAG.getNode(ISD::AssertZext, MVT::i32, Extract,
return Res;
}
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
// TODO: handle v16i8.
- if (MVT::getSizeInBits(VT) == 16) {
+ if (VT.getSizeInBits() == 16) {
SDOperand Vec = Op.getOperand(0);
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Idx == 0)
DAG.getNode(ISD::BIT_CONVERT, MVT::v4i32, Vec),
Op.getOperand(1)));
// Transform it so it match pextrw which produces a 32-bit result.
- MVT::ValueType EVT = (MVT::ValueType)(VT+1);
+ MVT EVT = (MVT::SimpleValueType)(VT.getSimpleVT()+1);
SDOperand Extract = DAG.getNode(X86ISD::PEXTRW, EVT,
Op.getOperand(0), Op.getOperand(1));
SDOperand Assert = DAG.getNode(ISD::AssertZext, EVT, Extract,
DAG.getValueType(VT));
return DAG.getNode(ISD::TRUNCATE, VT, Assert);
- } else if (MVT::getSizeInBits(VT) == 32) {
+ } else if (VT.getSizeInBits() == 32) {
unsigned Idx = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (Idx == 0)
return Op;
// SHUFPS the element to the lowest double word, then movss.
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
IdxVec.
- push_back(DAG.getConstant(Idx, MVT::getVectorElementType(MaskVT)));
+ push_back(DAG.getConstant(Idx, MaskVT.getVectorElementType()));
IdxVec.
- push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
IdxVec.
- push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
IdxVec.
- push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
SDOperand Vec = Op.getOperand(0);
Vec, DAG.getNode(ISD::UNDEF, Vec.getValueType()), Mask);
return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, VT, Vec,
DAG.getIntPtrConstant(0));
- } else if (MVT::getSizeInBits(VT) == 64) {
+ } else if (VT.getSizeInBits() == 64) {
// FIXME: .td only matches this for <2 x f64>, not <2 x i64> on 32b
// FIXME: seems like this should be unnecessary if mov{h,l}pd were taught
// to match extract_elt for f64.
// UNPCKHPD the element to the lowest double word, then movsd.
// Note if the lower 64 bits of the result of the UNPCKHPD is then stored
// to a f64mem, the whole operation is folded into a single MOVHPDmr.
- MVT::ValueType MaskVT = MVT::getIntVectorWithNumElements(4);
+ MVT MaskVT = MVT::getIntVectorWithNumElements(4);
SmallVector<SDOperand, 8> IdxVec;
- IdxVec.push_back(DAG.getConstant(1, MVT::getVectorElementType(MaskVT)));
+ IdxVec.push_back(DAG.getConstant(1, MaskVT.getVectorElementType()));
IdxVec.
- push_back(DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(MaskVT)));
+ push_back(DAG.getNode(ISD::UNDEF, MaskVT.getVectorElementType()));
SDOperand Mask = DAG.getNode(ISD::BUILD_VECTOR, MaskVT,
&IdxVec[0], IdxVec.size());
SDOperand Vec = Op.getOperand(0);
SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT_SSE4(SDOperand Op, SelectionDAG &DAG){
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ MVT VT = Op.getValueType();
+ MVT EVT = VT.getVectorElementType();
SDOperand N0 = Op.getOperand(0);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
- if ((MVT::getSizeInBits(EVT) == 8) || (MVT::getSizeInBits(EVT) == 16)) {
- unsigned Opc = (MVT::getSizeInBits(EVT) == 8) ? X86ISD::PINSRB
+ if ((EVT.getSizeInBits() == 8) || (EVT.getSizeInBits() == 16)) {
+ unsigned Opc = (EVT.getSizeInBits() == 8) ? X86ISD::PINSRB
: X86ISD::PINSRW;
// Transform it so it match pinsr{b,w} which expects a GR32 as its second
// argument.
SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ MVT VT = Op.getValueType();
+ MVT EVT = VT.getVectorElementType();
if (Subtarget->hasSSE41())
return LowerINSERT_VECTOR_ELT_SSE4(Op, DAG);
SDOperand N1 = Op.getOperand(1);
SDOperand N2 = Op.getOperand(2);
- if (MVT::getSizeInBits(EVT) == 16) {
+ if (EVT.getSizeInBits() == 16) {
// Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
if (N1.getValueType() != MVT::i32)
SDOperand
X86TargetLowering::LowerSCALAR_TO_VECTOR(SDOperand Op, SelectionDAG &DAG) {
SDOperand AnyExt = DAG.getNode(ISD::ANY_EXTEND, MVT::i32, Op.getOperand(0));
- MVT::ValueType VT = MVT::v2i32;
- switch (Op.getValueType()) {
+ MVT VT = MVT::v2i32;
+ switch (Op.getValueType().getSimpleVT()) {
default: break;
case MVT::v16i8:
case MVT::v8i16:
X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDOperand Result = DAG.getTargetGlobalAddress(GV, getPointerTy());
- // If it's a debug information descriptor, don't mess with it.
- if (DAG.isVerifiedDebugInfoDesc(Op))
- return Result;
Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(), Result);
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC_ &&
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 32 bit
static SDOperand
LowerToTLSGeneralDynamicModel32(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT::ValueType PtrVT) {
+ const MVT PtrVT) {
SDOperand InFlag;
SDOperand Chain = DAG.getCopyToReg(DAG.getEntryNode(), X86::EBX,
DAG.getNode(X86ISD::GlobalBaseReg,
// Lower ISD::GlobalTLSAddress using the "general dynamic" model, 64 bit
static SDOperand
LowerToTLSGeneralDynamicModel64(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT::ValueType PtrVT) {
+ const MVT PtrVT) {
SDOperand InFlag, Chain;
// emit leaq symbol@TLSGD(%rip), %rdi
// Lower ISD::GlobalTLSAddress using the "initial exec" (for no-pic) or
// "local exec" model.
-static SDOperand
-LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
- const MVT::ValueType PtrVT) {
+static SDOperand LowerToTLSExecModel(GlobalAddressSDNode *GA, SelectionDAG &DAG,
+ const MVT PtrVT) {
// Get the Thread Pointer
SDOperand ThreadPointer = DAG.getNode(X86ISD::THREAD_POINTER, PtrVT);
// emit "addl x@ntpoff,%eax" (local exec) or "addl x@indntpoff,%eax" (initial
/// take a 2 x i32 value to shift plus a shift amount.
SDOperand X86TargetLowering::LowerShift(SDOperand Op, SelectionDAG &DAG) {
assert(Op.getNumOperands() == 3 && "Not a double-shift!");
- MVT::ValueType VT = Op.getValueType();
- unsigned VTBits = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ unsigned VTBits = VT.getSizeInBits();
bool isSRA = Op.getOpcode() == ISD::SRA_PARTS;
SDOperand ShOpLo = Op.getOperand(0);
SDOperand ShOpHi = Op.getOperand(1);
Tmp3 = DAG.getNode(isSRA ? ISD::SRA : ISD::SRL, VT, ShOpHi, ShAmt);
}
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+ const MVT *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
DAG.getConstant(VTBits, MVT::i8));
SDOperand Cond = DAG.getNode(X86ISD::CMP, VT,
}
SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType SrcVT = Op.getOperand(0).getValueType();
- assert(SrcVT <= MVT::i64 && SrcVT >= MVT::i16 &&
+ MVT SrcVT = Op.getOperand(0).getValueType();
+ assert(SrcVT.getSimpleVT() <= MVT::i64 && SrcVT.getSimpleVT() >= MVT::i16 &&
"Unknown SINT_TO_FP to lower!");
// These are really Legal; caller falls through into that case.
Subtarget->is64Bit())
return SDOperand();
- unsigned Size = MVT::getSizeInBits(SrcVT)/8;
+ unsigned Size = SrcVT.getSizeInBits()/8;
MachineFunction &MF = DAG.getMachineFunction();
int SSFI = MF.getFrameInfo()->CreateStackObject(Size, Size);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
std::pair<SDOperand,SDOperand> X86TargetLowering::
FP_TO_SINTHelper(SDOperand Op, SelectionDAG &DAG) {
- assert(Op.getValueType() <= MVT::i64 && Op.getValueType() >= MVT::i16 &&
+ assert(Op.getValueType().getSimpleVT() <= MVT::i64 &&
+ Op.getValueType().getSimpleVT() >= MVT::i16 &&
"Unknown FP_TO_SINT to lower!");
// These are really Legal.
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
// stack slot.
MachineFunction &MF = DAG.getMachineFunction();
- unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
+ unsigned MemSize = Op.getValueType().getSizeInBits()/8;
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
unsigned Opc;
- switch (Op.getValueType()) {
+ switch (Op.getValueType().getSimpleVT()) {
default: assert(0 && "Invalid FP_TO_SINT to lower!");
case MVT::i16: Opc = X86ISD::FP_TO_INT16_IN_MEM; break;
case MVT::i32: Opc = X86ISD::FP_TO_INT32_IN_MEM; break;
}
SDOperand X86TargetLowering::LowerFABS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EltVT = VT;
- if (MVT::isVector(VT))
- EltVT = MVT::getVectorElementType(VT);
+ MVT VT = Op.getValueType();
+ MVT EltVT = VT;
+ if (VT.isVector())
+ EltVT = VT.getVectorElementType();
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
Constant *C = ConstantFP::get(APFloat(APInt(64, ~(1ULL << 63))));
}
SDOperand X86TargetLowering::LowerFNEG(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType EltVT = VT;
+ MVT VT = Op.getValueType();
+ MVT EltVT = VT;
unsigned EltNum = 1;
- if (MVT::isVector(VT)) {
- EltVT = MVT::getVectorElementType(VT);
- EltNum = MVT::getVectorNumElements(VT);
+ if (VT.isVector()) {
+ EltVT = VT.getVectorElementType();
+ EltNum = VT.getVectorNumElements();
}
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
SDOperand Mask = DAG.getLoad(VT, DAG.getEntryNode(), CPIdx,
PseudoSourceValue::getConstantPool(), 0,
false, 16);
- if (MVT::isVector(VT)) {
+ if (VT.isVector()) {
return DAG.getNode(ISD::BIT_CONVERT, VT,
DAG.getNode(ISD::XOR, MVT::v2i64,
DAG.getNode(ISD::BIT_CONVERT, MVT::v2i64, Op.getOperand(0)),
SDOperand X86TargetLowering::LowerFCOPYSIGN(SDOperand Op, SelectionDAG &DAG) {
SDOperand Op0 = Op.getOperand(0);
SDOperand Op1 = Op.getOperand(1);
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType SrcVT = Op1.getValueType();
+ MVT VT = Op.getValueType();
+ MVT SrcVT = Op1.getValueType();
// If second operand is smaller, extend it first.
- if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
+ if (SrcVT.bitsLT(VT)) {
Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
SrcVT = VT;
}
// And if it is bigger, shrink it first.
- if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
+ if (SrcVT.bitsGT(VT)) {
Op1 = DAG.getNode(ISD::FP_ROUND, VT, Op1, DAG.getIntPtrConstant(1));
SrcVT = VT;
}
SDOperand SignBit = DAG.getNode(X86ISD::FAND, SrcVT, Op1, Mask1);
// Shift sign bit right or left if the two operands have different types.
- if (MVT::getSizeInBits(SrcVT) > MVT::getSizeInBits(VT)) {
+ if (SrcVT.bitsGT(VT)) {
// Op0 is MVT::f32, Op1 is MVT::f64.
SignBit = DAG.getNode(ISD::SCALAR_TO_VECTOR, MVT::v2f64, SignBit);
SignBit = DAG.getNode(X86ISD::FSRL, MVT::v2f64, SignBit,
SDOperand Op1 = Op.getOperand(1);
SDOperand CC = Op.getOperand(2);
ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
- bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
+ bool isFP = Op.getOperand(1).getValueType().isFloatingPoint();
unsigned X86CC;
if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
SDOperand Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
bool IllegalFPCMov = false;
- if (MVT::isFloatingPoint(VT) && !MVT::isVector(VT) &&
+ if (VT.isFloatingPoint() && !VT.isVector() &&
!isScalarFPTypeInSSEReg(VT)) // FPStack?
IllegalFPCMov = !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
Cond= DAG.getNode(X86ISD::CMP, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
}
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
+ const MVT *VTs = DAG.getNodeValueTypes(Op.getValueType(),
MVT::Flag);
SmallVector<SDOperand, 4> Ops;
// X86ISD::CMOV means set the result (which is operand 1) to the RHS if
SelectionDAG &DAG) {
assert(Subtarget->isTargetCygMing() &&
"This should be used only on Cygwin/Mingw targets");
-
+
// Get the inputs.
SDOperand Chain = Op.getOperand(0);
SDOperand Size = Op.getOperand(1);
// FIXME: Ensure alignment here
SDOperand Flag;
-
- MVT::ValueType IntPtr = getPointerTy();
- MVT::ValueType SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+
+ MVT IntPtr = getPointerTy();
+ MVT SPTy = Subtarget->is64Bit() ? MVT::i64 : MVT::i32;
+
+ Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(0));
Chain = DAG.getCopyToReg(Chain, X86::EAX, Size, Flag);
Flag = Chain.getValue(1);
SDOperand Ops[] = { Chain,
DAG.getTargetExternalSymbol("_alloca", IntPtr),
DAG.getRegister(X86::EAX, IntPtr),
+ DAG.getRegister(X86StackPtr, SPTy),
Flag };
- Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 4);
+ Chain = DAG.getNode(X86ISD::CALL, NodeTys, Ops, 5);
Flag = Chain.getValue(1);
+ Chain = DAG.getCALLSEQ_END(Chain,
+ DAG.getIntPtrConstant(0),
+ DAG.getIntPtrConstant(0),
+ Flag);
+
Chain = DAG.getCopyFromReg(Chain, X86StackPtr, SPTy).getValue(1);
-
- std::vector<MVT::ValueType> Tys;
+
+ std::vector<MVT> Tys;
Tys.push_back(SPTy);
Tys.push_back(MVT::Other);
SDOperand Ops1[2] = { Chain.getValue(0), Chain };
ConstantSDNode *V = dyn_cast<ConstantSDNode>(Src);
if (const char *bzeroEntry =
V && V->isNullValue() ? Subtarget->getBZeroEntry() : 0) {
- MVT::ValueType IntPtr = getPointerTy();
+ MVT IntPtr = getPointerTy();
const Type *IntPtrTy = getTargetData()->getIntPtrType();
TargetLowering::ArgListTy Args;
TargetLowering::ArgListEntry Entry;
uint64_t SizeVal = ConstantSize->getValue();
SDOperand InFlag(0, 0);
- MVT::ValueType AVT;
+ MVT AVT;
SDOperand Count;
ConstantSDNode *ValC = dyn_cast<ConstantSDNode>(Src);
unsigned BytesLeft = 0;
break;
}
- if (AVT > MVT::i8) {
- unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
+ if (AVT.bitsGT(MVT::i8)) {
+ unsigned UBytes = AVT.getSizeInBits() / 8;
Count = DAG.getIntPtrConstant(SizeVal / UBytes);
BytesLeft = SizeVal % UBytes;
}
if (TwoRepStos) {
InFlag = Chain.getValue(1);
Count = Size;
- MVT::ValueType CVT = Count.getValueType();
+ MVT CVT = Count.getValueType();
SDOperand Left = DAG.getNode(ISD::AND, CVT, Count,
DAG.getConstant((AVT == MVT::i64) ? 7 : 3, CVT));
Chain = DAG.getCopyToReg(Chain, (CVT == MVT::i64) ? X86::RCX : X86::ECX,
} else if (BytesLeft) {
// Handle the last 1 - 7 bytes.
unsigned Offset = SizeVal - BytesLeft;
- MVT::ValueType AddrVT = Dst.getValueType();
- MVT::ValueType SizeVT = Size.getValueType();
+ MVT AddrVT = Dst.getValueType();
+ MVT SizeVT = Size.getValueType();
Chain = DAG.getMemset(Chain,
DAG.getNode(ISD::ADD, AddrVT, Dst,
if (!AlwaysInline && SizeVal > getSubtarget()->getMaxInlineSizeThreshold())
return SDOperand();
- MVT::ValueType AVT;
+ MVT AVT;
unsigned BytesLeft = 0;
if (Align >= 8 && Subtarget->is64Bit())
AVT = MVT::i64;
else
AVT = MVT::i8;
- unsigned UBytes = MVT::getSizeInBits(AVT) / 8;
+ unsigned UBytes = AVT.getSizeInBits() / 8;
unsigned CountVal = SizeVal / UBytes;
SDOperand Count = DAG.getIntPtrConstant(CountVal);
BytesLeft = SizeVal % UBytes;
if (BytesLeft) {
// Handle the last 1 - 7 bytes.
unsigned Offset = SizeVal - BytesLeft;
- MVT::ValueType DstVT = Dst.getValueType();
- MVT::ValueType SrcVT = Src.getValueType();
- MVT::ValueType SizeVT = Size.getValueType();
+ MVT DstVT = Dst.getValueType();
+ MVT SrcVT = Src.getValueType();
+ MVT SizeVT = Size.getValueType();
Results.push_back(DAG.getMemcpy(Chain,
DAG.getNode(ISD::ADD, DstVT, Dst,
DAG.getConstant(Offset, DstVT)),
assert(0 && "VAArgInst is not yet implemented for x86-64!");
abort();
+ return SDOperand();
}
SDOperand X86TargetLowering::LowerVACOPY(SDOperand Op, SelectionDAG &DAG) {
return SDOperand();
unsigned NewIntNo = 0;
- MVT::ValueType ShAmtVT = MVT::v4i32;
+ MVT ShAmtVT = MVT::v4i32;
switch (IntNo) {
case Intrinsic::x86_sse2_pslli_w:
NewIntNo = Intrinsic::x86_sse2_psll_w;
break;
}
}
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
ShAmt = DAG.getNode(ISD::BIT_CONVERT, VT,
DAG.getNode(ISD::SCALAR_TO_VECTOR, ShAmtVT, ShAmt));
return DAG.getNode(ISD::INTRINSIC_WO_CHAIN, VT,
const unsigned char JMP64r = TII->getBaseOpcodeFor(X86::JMP64r);
const unsigned char MOV64ri = TII->getBaseOpcodeFor(X86::MOV64ri);
- const unsigned char N86R10 =
- ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R10);
- const unsigned char N86R11 =
- ((const X86RegisterInfo*)RegInfo)->getX86RegNum(X86::R11);
+ const unsigned char N86R10 = RegInfo->getX86RegNum(X86::R10);
+ const unsigned char N86R11 = RegInfo->getX86RegNum(X86::R11);
const unsigned char REX_WB = 0x40 | 0x08 | 0x01; // REX prefix
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
const unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
- const unsigned char N86Reg =
- ((const X86RegisterInfo*)RegInfo)->getX86RegNum(NestReg);
+ const unsigned char N86Reg = RegInfo->getX86RegNum(NestReg);
OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Trmp, TrmpAddr, 0);
const TargetMachine &TM = MF.getTarget();
const TargetFrameInfo &TFI = *TM.getFrameInfo();
unsigned StackAlignment = TFI.getStackAlignment();
- MVT::ValueType VT = Op.getValueType();
+ MVT VT = Op.getValueType();
// Save FP Control Word to stack slot
int SSFI = MF.getFrameInfo()->CreateStackObject(2, StackAlignment);
DAG.getConstant(3, MVT::i16));
- return DAG.getNode((MVT::getSizeInBits(VT) < 16 ?
+ return DAG.getNode((VT.getSizeInBits() < 16 ?
ISD::TRUNCATE : ISD::ZERO_EXTEND), VT, RetVal);
}
SDOperand X86TargetLowering::LowerCTLZ(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType OpVT = VT;
- unsigned NumBits = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ MVT OpVT = VT;
+ unsigned NumBits = VT.getSizeInBits();
Op = Op.getOperand(0);
if (VT == MVT::i8) {
}
SDOperand X86TargetLowering::LowerCTTZ(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType VT = Op.getValueType();
- MVT::ValueType OpVT = VT;
- unsigned NumBits = MVT::getSizeInBits(VT);
+ MVT VT = Op.getValueType();
+ MVT OpVT = VT;
+ unsigned NumBits = VT.getSizeInBits();
Op = Op.getOperand(0);
if (VT == MVT::i8) {
}
SDOperand X86TargetLowering::LowerLCS(SDOperand Op, SelectionDAG &DAG) {
- MVT::ValueType T = cast<AtomicSDNode>(Op.Val)->getVT();
+ MVT T = cast<AtomicSDNode>(Op.Val)->getVT();
unsigned Reg = 0;
unsigned size = 0;
- switch(T) {
+ switch(T.getSimpleVT()) {
+ default:
+ assert(false && "Invalid value type!");
case MVT::i8: Reg = X86::AL; size = 1; break;
case MVT::i16: Reg = X86::AX; size = 2; break;
case MVT::i32: Reg = X86::EAX; size = 4; break;
}
SDNode* X86TargetLowering::ExpandATOMIC_LCS(SDNode* Op, SelectionDAG &DAG) {
- MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT();
+ MVT T = cast<AtomicSDNode>(Op)->getVT();
assert (T == MVT::i64 && "Only know how to expand i64 CAS");
SDOperand cpInL, cpInH;
cpInL = DAG.getNode(ISD::EXTRACT_ELEMENT, MVT::i32, Op->getOperand(3),
}
SDNode* X86TargetLowering::ExpandATOMIC_LSS(SDNode* Op, SelectionDAG &DAG) {
- MVT::ValueType T = cast<AtomicSDNode>(Op)->getVT();
+ MVT T = cast<AtomicSDNode>(Op)->getVT();
assert (T == MVT::i32 && "Only know how to expand i32 LSS");
SDOperand negOp = DAG.getNode(ISD::SUB, T,
DAG.getConstant(0, T), Op->getOperand(2));
case X86ISD::LCMPXCHG8_DAG: return "X86ISD::LCMPXCHG8_DAG";
case X86ISD::VZEXT_MOVL: return "X86ISD::VZEXT_MOVL";
case X86ISD::VZEXT_LOAD: return "X86ISD::VZEXT_LOAD";
+ case X86ISD::VSHL: return "X86ISD::VSHL";
+ case X86ISD::VSRL: return "X86ISD::VSRL";
}
}
return Subtarget->is64Bit() || NumBits1 < 64;
}
-bool X86TargetLowering::isTruncateFree(MVT::ValueType VT1,
- MVT::ValueType VT2) const {
- if (!MVT::isInteger(VT1) || !MVT::isInteger(VT2))
+bool X86TargetLowering::isTruncateFree(MVT VT1, MVT VT2) const {
+ if (!VT1.isInteger() || !VT2.isInteger())
return false;
- unsigned NumBits1 = MVT::getSizeInBits(VT1);
- unsigned NumBits2 = MVT::getSizeInBits(VT2);
+ unsigned NumBits1 = VT1.getSizeInBits();
+ unsigned NumBits2 = VT2.getSizeInBits();
if (NumBits1 <= NumBits2)
return false;
return Subtarget->is64Bit() || NumBits1 < 64;
/// By default, if a target supports the VECTOR_SHUFFLE node, all mask values
/// are assumed to be legal.
bool
-X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT::ValueType VT) const {
+X86TargetLowering::isShuffleMaskLegal(SDOperand Mask, MVT VT) const {
// Only do shuffles on 128-bit vector types for now.
- if (MVT::getSizeInBits(VT) == 64) return false;
+ if (VT.getSizeInBits() == 64) return false;
return (Mask.Val->getNumOperands() <= 4 ||
isIdentityMask(Mask.Val) ||
isIdentityMask(Mask.Val, true) ||
bool
X86TargetLowering::isVectorClearMaskLegal(const std::vector<SDOperand> &BVOps,
- MVT::ValueType EVT,
- SelectionDAG &DAG) const {
+ MVT EVT, SelectionDAG &DAG) const {
unsigned NumElts = BVOps.size();
// Only do shuffles on 128-bit vector types for now.
- if (MVT::getSizeInBits(EVT) * NumElts == 64) return false;
+ if (EVT.getSizeInBits() * NumElts == 64) return false;
if (NumElts == 2) return true;
if (NumElts == 4) {
return (isMOVLMask(&BVOps[0], 4) ||
X86TargetLowering::EmitAtomicBitwiseWithCustomInserter(MachineInstr *bInstr,
MachineBasicBlock *MBB,
unsigned regOpc,
- unsigned immOpc) {
+ unsigned immOpc,
+ bool invSrc) {
// For the atomic bitwise operator, we generate
// thisMBB:
// newMBB:
MachineInstrBuilder MIB = BuildMI(newMBB, TII->get(X86::MOV32rm), t1);
for (int i=0; i <= lastAddrIndx; ++i)
(*MIB).addOperand(*argOpers[i]);
-
+
+ unsigned tt = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
+ if (invSrc) {
+ MIB = BuildMI(newMBB, TII->get(X86::NOT32r), tt).addReg(t1);
+ }
+ else
+ tt = t1;
+
unsigned t2 = F->getRegInfo().createVirtualRegister(X86::GR32RegisterClass);
assert( (argOpers[valArgIndx]->isReg() || argOpers[valArgIndx]->isImm())
&& "invalid operand");
MIB = BuildMI(newMBB, TII->get(regOpc), t2);
else
MIB = BuildMI(newMBB, TII->get(immOpc), t2);
- MIB.addReg(t1);
+ MIB.addReg(tt);
(*MIB).addOperand(*argOpers[valArgIndx]);
-
+
MIB = BuildMI(newMBB, TII->get(X86::MOV32rr), X86::EAX);
MIB.addReg(t1);
case X86::ATOMXOR32:
return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::XOR32rr,
X86::XOR32ri);
+ case X86::ATOMNAND32:
+ return EmitAtomicBitwiseWithCustomInserter(MI, BB, X86::AND32rr,
+ X86::AND32ri, true);
case X86::ATOMMIN32:
return EmitAtomicMinMaxWithCustomInserter(MI, BB, X86::CMOVL32rr);
case X86::ATOMMAX32:
}
}
-/// getShuffleScalarElt - Returns the scalar element that will make up the ith
-/// element of the result of the vector shuffle.
-static SDOperand getShuffleScalarElt(SDNode *N, unsigned i, SelectionDAG &DAG) {
- MVT::ValueType VT = N->getValueType(0);
- SDOperand PermMask = N->getOperand(2);
- unsigned NumElems = PermMask.getNumOperands();
- SDOperand V = (i < NumElems) ? N->getOperand(0) : N->getOperand(1);
- i %= NumElems;
- if (V.getOpcode() == ISD::SCALAR_TO_VECTOR) {
- return (i == 0)
- ? V.getOperand(0) : DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
- } else if (V.getOpcode() == ISD::VECTOR_SHUFFLE) {
- SDOperand Idx = PermMask.getOperand(i);
- if (Idx.getOpcode() == ISD::UNDEF)
- return DAG.getNode(ISD::UNDEF, MVT::getVectorElementType(VT));
- return getShuffleScalarElt(V.Val,cast<ConstantSDNode>(Idx)->getValue(),DAG);
- }
- return SDOperand();
-}
-
/// isGAPlusOffset - Returns true (and the GlobalValue and the offset) if the
-/// node is a GlobalAddress + an offset.
-static bool isGAPlusOffset(SDNode *N, GlobalValue* &GA, int64_t &Offset) {
- unsigned Opc = N->getOpcode();
- if (Opc == X86ISD::Wrapper) {
- if (dyn_cast<GlobalAddressSDNode>(N->getOperand(0))) {
+/// node is a GlobalAddress + offset.
+bool X86TargetLowering::isGAPlusOffset(SDNode *N,
+ GlobalValue* &GA, int64_t &Offset) const{
+ if (N->getOpcode() == X86ISD::Wrapper) {
+ if (isa<GlobalAddressSDNode>(N->getOperand(0))) {
GA = cast<GlobalAddressSDNode>(N->getOperand(0))->getGlobal();
return true;
}
- } else if (Opc == ISD::ADD) {
- SDOperand N1 = N->getOperand(0);
- SDOperand N2 = N->getOperand(1);
- if (isGAPlusOffset(N1.Val, GA, Offset)) {
- ConstantSDNode *V = dyn_cast<ConstantSDNode>(N2);
- if (V) {
- Offset += V->getSignExtended();
- return true;
- }
- } else if (isGAPlusOffset(N2.Val, GA, Offset)) {
- ConstantSDNode *V = dyn_cast<ConstantSDNode>(N1);
- if (V) {
- Offset += V->getSignExtended();
- return true;
- }
- }
- }
- return false;
-}
-
-/// isConsecutiveLoad - Returns true if N is loading from an address of Base
-/// + Dist * Size.
-static bool isConsecutiveLoad(SDNode *N, SDNode *Base, int Dist, int Size,
- MachineFrameInfo *MFI) {
- if (N->getOperand(0).Val != Base->getOperand(0).Val)
- return false;
-
- SDOperand Loc = N->getOperand(1);
- SDOperand BaseLoc = Base->getOperand(1);
- if (Loc.getOpcode() == ISD::FrameIndex) {
- if (BaseLoc.getOpcode() != ISD::FrameIndex)
- return false;
- int FI = cast<FrameIndexSDNode>(Loc)->getIndex();
- int BFI = cast<FrameIndexSDNode>(BaseLoc)->getIndex();
- int FS = MFI->getObjectSize(FI);
- int BFS = MFI->getObjectSize(BFI);
- if (FS != BFS || FS != Size) return false;
- return MFI->getObjectOffset(FI) == (MFI->getObjectOffset(BFI) + Dist*Size);
- } else {
- GlobalValue *GV1 = NULL;
- GlobalValue *GV2 = NULL;
- int64_t Offset1 = 0;
- int64_t Offset2 = 0;
- bool isGA1 = isGAPlusOffset(Loc.Val, GV1, Offset1);
- bool isGA2 = isGAPlusOffset(BaseLoc.Val, GV2, Offset2);
- if (isGA1 && isGA2 && GV1 == GV2)
- return Offset1 == (Offset2 + Dist*Size);
}
-
- return false;
+ return TargetLowering::isGAPlusOffset(N, GA, Offset);
}
-static bool isBaseAlignmentOfN(unsigned N, SDNode *Base, MachineFrameInfo *MFI,
- const X86Subtarget *Subtarget) {
+static bool isBaseAlignmentOfN(unsigned N, SDNode *Base,
+ const TargetLowering &TLI) {
GlobalValue *GV;
int64_t Offset = 0;
- if (isGAPlusOffset(Base, GV, Offset))
+ if (TLI.isGAPlusOffset(Base, GV, Offset))
return (GV->getAlignment() >= N && (Offset % N) == 0);
// DAG combine handles the stack object case.
return false;
}
static bool EltsFromConsecutiveLoads(SDNode *N, SDOperand PermMask,
- unsigned NumElems, MVT::ValueType EVT,
- MachineFrameInfo *MFI,
- SelectionDAG &DAG, SDNode *&Base) {
+ unsigned NumElems, MVT EVT,
+ SDNode *&Base,
+ SelectionDAG &DAG, MachineFrameInfo *MFI,
+ const TargetLowering &TLI) {
Base = NULL;
for (unsigned i = 0; i < NumElems; ++i) {
SDOperand Idx = PermMask.getOperand(i);
}
unsigned Index = cast<ConstantSDNode>(Idx)->getValue();
- SDOperand Elt = getShuffleScalarElt(N, Index, DAG);
+ SDOperand Elt = DAG.getShuffleScalarElt(N, Index);
if (!Elt.Val ||
(Elt.getOpcode() != ISD::UNDEF && !ISD::isNON_EXTLoad(Elt.Val)))
return false;
if (!Base) {
Base = Elt.Val;
+ if (Base->getOpcode() == ISD::UNDEF)
+ return false;
continue;
}
if (Elt.getOpcode() == ISD::UNDEF)
continue;
- if (!isConsecutiveLoad(Elt.Val, Base, i, MVT::getSizeInBits(EVT)/8,MFI))
+ if (!TLI.isConsecutiveLoad(Elt.Val, Base,
+ EVT.getSizeInBits()/8, i, MFI))
return false;
}
return true;
/// if the load addresses are consecutive, non-overlapping, and in the right
/// order.
static SDOperand PerformShuffleCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const TargetLowering &TLI) {
MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
- MVT::ValueType VT = N->getValueType(0);
- MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ MVT VT = N->getValueType(0);
+ MVT EVT = VT.getVectorElementType();
SDOperand PermMask = N->getOperand(2);
unsigned NumElems = PermMask.getNumOperands();
SDNode *Base = NULL;
- if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, MFI, DAG, Base))
+ if (!EltsFromConsecutiveLoads(N, PermMask, NumElems, EVT, Base,
+ DAG, MFI, TLI))
return SDOperand();
LoadSDNode *LD = cast<LoadSDNode>(Base);
- if (isBaseAlignmentOfN(16, Base->getOperand(1).Val, MFI, Subtarget))
+ if (isBaseAlignmentOfN(16, Base->getOperand(1).Val, TLI))
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
LD->getSrcValueOffset(), LD->isVolatile());
return DAG.getLoad(VT, LD->getChain(), LD->getBasePtr(), LD->getSrcValue(),
LD->getAlignment());
}
-static SDNode *getBuildPairElt(SDNode *N, unsigned i) {
- SDOperand Elt = N->getOperand(i);
- if (Elt.getOpcode() != ISD::MERGE_VALUES)
- return Elt.Val;
- return Elt.getOperand(Elt.ResNo).Val;
-}
-
+/// PerformBuildVectorCombine - build_vector 0,(load i64 / f64) -> movq / movsd.
static SDOperand PerformBuildVectorCombine(SDNode *N, SelectionDAG &DAG,
- const X86Subtarget *Subtarget) {
+ const X86Subtarget *Subtarget,
+ const TargetLowering &TLI) {
+ unsigned NumOps = N->getNumOperands();
+
// Ignore single operand BUILD_VECTOR.
- if (N->getNumOperands() == 1)
+ if (NumOps == 1)
return SDOperand();
- MVT::ValueType VT = N->getValueType(0);
- MVT::ValueType EVT = MVT::getVectorElementType(VT);
+ MVT VT = N->getValueType(0);
+ MVT EVT = VT.getVectorElementType();
if ((EVT != MVT::i64 && EVT != MVT::f64) || Subtarget->is64Bit())
// We are looking for load i64 and zero extend. We want to transform
// it before legalizer has a chance to expand it. Also look for i64
return SDOperand();
// Value must be a load.
- MachineFrameInfo *MFI = DAG.getMachineFunction().getFrameInfo();
SDNode *Base = N->getOperand(0).Val;
if (!isa<LoadSDNode>(Base)) {
- if (Base->getOpcode() == ISD::BIT_CONVERT)
- Base = Base->getOperand(0).Val;
- if (Base->getOpcode() != ISD::BUILD_PAIR)
- return SDOperand();
- SDNode *Pair = Base;
- Base = getBuildPairElt(Pair, 0);
- if (!ISD::isNON_EXTLoad(Base))
+ if (Base->getOpcode() != ISD::BIT_CONVERT)
return SDOperand();
- SDNode *NextLD = getBuildPairElt(Pair, 1);
- if (!ISD::isNON_EXTLoad(NextLD) ||
- !isConsecutiveLoad(NextLD, Base, 1, 4/*32 bits*/, MFI))
+ Base = Base->getOperand(0).Val;
+ if (!isa<LoadSDNode>(Base))
return SDOperand();
}
- LoadSDNode *LD = cast<LoadSDNode>(Base);
// Transform it into VZEXT_LOAD addr.
+ LoadSDNode *LD = cast<LoadSDNode>(Base);
+
+ // Load must not be an extload.
+ if (LD->getExtensionType() != ISD::NON_EXTLOAD)
+ return SDOperand();
+
return DAG.getNode(X86ISD::VZEXT_LOAD, VT, LD->getChain(), LD->getBasePtr());
}
// A preferable solution to the general problem is to figure out the right
// places to insert EMMS. This qualifies as a quick hack.
StoreSDNode *St = cast<StoreSDNode>(N);
- if (MVT::isVector(St->getValue().getValueType()) &&
- MVT::getSizeInBits(St->getValue().getValueType()) == 64 &&
+ if (St->getValue().getValueType().isVector() &&
+ St->getValue().getValueType().getSizeInBits() == 64 &&
isa<LoadSDNode>(St->getValue()) &&
!cast<LoadSDNode>(St->getValue())->isVolatile() &&
St->getChain().hasOneUse() && !St->isVolatile()) {
// Otherwise, lower to two 32-bit copies.
SDOperand LoAddr = Ld->getBasePtr();
SDOperand HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
- DAG.getConstant(MVT::i32, 4));
+ DAG.getConstant(4, MVT::i32));
SDOperand LoLd = DAG.getLoad(MVT::i32, Ld->getChain(), LoAddr,
Ld->getSrcValue(), Ld->getSrcValueOffset(),
LoAddr = St->getBasePtr();
HiAddr = DAG.getNode(ISD::ADD, MVT::i32, LoAddr,
- DAG.getConstant(MVT::i32, 4));
+ DAG.getConstant(4, MVT::i32));
SDOperand LoSt = DAG.getStore(NewChain, LoLd, LoAddr,
St->getSrcValue(), St->getSrcValueOffset(),
SelectionDAG &DAG = DCI.DAG;
switch (N->getOpcode()) {
default: break;
- case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, Subtarget);
- case ISD::BUILD_VECTOR: return PerformBuildVectorCombine(N, DAG, Subtarget);
+ case ISD::VECTOR_SHUFFLE: return PerformShuffleCombine(N, DAG, *this);
+ case ISD::BUILD_VECTOR:
+ return PerformBuildVectorCombine(N, DAG, Subtarget, *this);
case ISD::SELECT: return PerformSELECTCombine(N, DAG, Subtarget);
case ISD::STORE: return PerformSTORECombine(N, DAG, Subtarget);
case X86ISD::FXOR:
/// with another that has more specific requirements based on the type of the
/// corresponding operand.
const char *X86TargetLowering::
-LowerXConstraint(MVT::ValueType ConstraintVT) const {
+LowerXConstraint(MVT ConstraintVT) const {
// FP X constraints get lowered to SSE1/2 registers if available, otherwise
// 'f' like normal targets.
- if (MVT::isFloatingPoint(ConstraintVT)) {
+ if (ConstraintVT.isFloatingPoint()) {
if (Subtarget->hasSSE2())
return "Y";
if (Subtarget->hasSSE1())
std::vector<unsigned> X86TargetLowering::
getRegClassForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
if (Constraint.size() == 1) {
// FIXME: not handling fp-stack yet!
switch (Constraint[0]) { // GCC X86 Constraint Letters
std::pair<unsigned, const TargetRegisterClass*>
X86TargetLowering::getRegForInlineAsmConstraint(const std::string &Constraint,
- MVT::ValueType VT) const {
+ MVT VT) const {
// First, see if this is a constraint that directly corresponds to an LLVM
// register class.
if (Constraint.size() == 1) {
// FALL THROUGH.
case 'x': // SSE_REGS if SSE1 allowed
if (!Subtarget->hasSSE1()) break;
-
- switch (VT) {
+
+ switch (VT.getSimpleVT()) {
default: break;
// Scalar SSE types.
case MVT::f32: