//===----------------------------------------------------------------------===//
#include "X86.h"
-#include "X86CodeEmitter.h"
#include "X86InstrBuilder.h"
#include "X86ISelLowering.h"
#include "X86MachineFunctionInfo.h"
#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/SelectionDAG.h"
#include "llvm/CodeGen/SSARegMap.h"
+#include "llvm/Support/CommandLine.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Target/TargetOptions.h"
#include "llvm/ADT/StringExtras.h"
X86TargetLowering::X86TargetLowering(TargetMachine &TM)
: TargetLowering(TM) {
Subtarget = &TM.getSubtarget<X86Subtarget>();
- X86ScalarSSE = Subtarget->hasSSE2();
+ X86ScalarSSEf64 = Subtarget->hasSSE2();
+ X86ScalarSSEf32 = Subtarget->hasSSE1();
X86StackPtr = Subtarget->is64Bit() ? X86::RSP : X86::ESP;
RegInfo = TM.getRegisterInfo();
setOperationAction(ISD::UINT_TO_FP , MVT::i64 , Expand);
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Promote);
} else {
- if (X86ScalarSSE)
+ if (X86ScalarSSEf64)
// If SSE i64 SINT_TO_FP is not available, expand i32 UINT_TO_FP.
setOperationAction(ISD::UINT_TO_FP , MVT::i32 , Expand);
else
setOperationAction(ISD::SINT_TO_FP , MVT::i1 , Promote);
setOperationAction(ISD::SINT_TO_FP , MVT::i8 , Promote);
// SSE has no i16 to fp conversion, only i32
- if (X86ScalarSSE)
+ if (X86ScalarSSEf32) {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Promote);
- else {
+ // f32 and f64 cases are Legal, f80 case is not
+ setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
+ } else {
setOperationAction(ISD::SINT_TO_FP , MVT::i16 , Custom);
setOperationAction(ISD::SINT_TO_FP , MVT::i32 , Custom);
}
- if (!Subtarget->is64Bit()) {
- // Custom lower SINT_TO_FP and FP_TO_SINT from/to i64 in 32-bit mode.
- setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
- setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
- }
+ // In 32-bit mode these are custom lowered. In 64-bit mode F32 and F64
+ // are Legal, f80 is custom lowered.
+ setOperationAction(ISD::FP_TO_SINT , MVT::i64 , Custom);
+ setOperationAction(ISD::SINT_TO_FP , MVT::i64 , Custom);
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
- if (X86ScalarSSE) {
+ if (X86ScalarSSEf32) {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
+ // f32 and f64 cases are Legal, f80 case is not
+ setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
} else {
setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Custom);
setOperationAction(ISD::FP_TO_SINT , MVT::i32 , Custom);
setOperationAction(ISD::FP_TO_UINT , MVT::i64 , Expand);
setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
} else {
- if (X86ScalarSSE && !Subtarget->hasSSE3())
+ if (X86ScalarSSEf32 && !Subtarget->hasSSE3())
// Expand FP_TO_UINT into a select.
// FIXME: We would like to use a Custom expander here eventually to do
// the optimal thing for SSE vs. the default expansion in the legalizer.
}
// TODO: when we have SSE, these could be more efficient, by using movd/movq.
- if (!X86ScalarSSE) {
+ if (!X86ScalarSSEf64) {
setOperationAction(ISD::BIT_CONVERT , MVT::f32 , Expand);
setOperationAction(ISD::BIT_CONVERT , MVT::i32 , Expand);
}
+ // Divide and remainder are lowered to use div or idiv in legalize in
+ // order to expose the intermediate computations to trivial CSE. This is
+ // most noticeable when both x/y and x%y are being computed; they can be
+ // done with a single div or idiv.
+ setOperationAction(ISD::SDIV , MVT::i8 , Custom);
+ setOperationAction(ISD::UDIV , MVT::i8 , Custom);
+ setOperationAction(ISD::SREM , MVT::i8 , Custom);
+ setOperationAction(ISD::UREM , MVT::i8 , Custom);
+ setOperationAction(ISD::SDIV , MVT::i16 , Custom);
+ setOperationAction(ISD::UDIV , MVT::i16 , Custom);
+ setOperationAction(ISD::SREM , MVT::i16 , Custom);
+ setOperationAction(ISD::UREM , MVT::i16 , Custom);
+ setOperationAction(ISD::SDIV , MVT::i32 , Custom);
+ setOperationAction(ISD::UDIV , MVT::i32 , Custom);
+ setOperationAction(ISD::SREM , MVT::i32 , Custom);
+ setOperationAction(ISD::UREM , MVT::i32 , Custom);
+ setOperationAction(ISD::SDIV , MVT::i64 , Custom);
+ setOperationAction(ISD::UDIV , MVT::i64 , Custom);
+ setOperationAction(ISD::SREM , MVT::i64 , Custom);
+ setOperationAction(ISD::UREM , MVT::i64 , Custom);
+
setOperationAction(ISD::BR_JT , MVT::Other, Expand);
setOperationAction(ISD::BRCOND , MVT::Other, Custom);
setOperationAction(ISD::BR_CC , MVT::Other, Expand);
setOperationAction(ISD::SELECT , MVT::i32 , Custom);
setOperationAction(ISD::SELECT , MVT::f32 , Custom);
setOperationAction(ISD::SELECT , MVT::f64 , Custom);
+ setOperationAction(ISD::SELECT , MVT::f80 , Custom);
setOperationAction(ISD::SETCC , MVT::i8 , Custom);
setOperationAction(ISD::SETCC , MVT::i16 , Custom);
setOperationAction(ISD::SETCC , MVT::i32 , Custom);
setOperationAction(ISD::SETCC , MVT::f32 , Custom);
setOperationAction(ISD::SETCC , MVT::f64 , Custom);
+ setOperationAction(ISD::SETCC , MVT::f80 , Custom);
if (Subtarget->is64Bit()) {
setOperationAction(ISD::SELECT , MVT::i64 , Custom);
setOperationAction(ISD::SETCC , MVT::i64 , Custom);
setOperationAction(ISD::MEMSET , MVT::Other, Custom);
setOperationAction(ISD::MEMCPY , MVT::Other, Custom);
- // We don't have line number support yet.
+ // Use the default ISD::LOCATION expansion.
setOperationAction(ISD::LOCATION, MVT::Other, Expand);
- setOperationAction(ISD::DEBUG_LOC, MVT::Other, Expand);
// FIXME - use subtarget debug flags
if (!Subtarget->isTargetDarwin() &&
!Subtarget->isTargetELF() &&
setExceptionPointerRegister(X86::EAX);
setExceptionSelectorRegister(X86::EDX);
}
+ setOperationAction(ISD::FRAME_TO_ARGS_OFFSET, MVT::i32, Custom);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i32, Expand);
- setOperationAction(ISD::ADJUST_TRAMP, MVT::i64, Expand);
- setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
+ setOperationAction(ISD::TRAMPOLINE, MVT::Other, Custom);
// VASTART needs to be custom lowered to use the VarArgsFrameIndex
setOperationAction(ISD::VASTART , MVT::Other, Custom);
else
setOperationAction(ISD::DYNAMIC_STACKALLOC, MVT::i32, Expand);
- if (X86ScalarSSE) {
+ if (X86ScalarSSEf64) {
+ // f32 and f64 use SSE.
// Set up the FP register classes.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
// cases we handle.
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
- addLegalFPImmediate(+0.0); // xorps / xorpd
+ addLegalFPImmediate(APFloat(+0.0)); // xorpd
+ addLegalFPImmediate(APFloat(+0.0f)); // xorps
// Conversions to long double (in X87) go through memory.
setConvertAction(MVT::f32, MVT::f80, Expand);
// Conversions from long double (in X87) go through memory.
setConvertAction(MVT::f80, MVT::f32, Expand);
setConvertAction(MVT::f80, MVT::f64, Expand);
+ } else if (X86ScalarSSEf32) {
+ // Use SSE for f32, x87 for f64.
+ // Set up the FP register classes.
+ addRegisterClass(MVT::f32, X86::FR32RegisterClass);
+ addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
+
+ // Use ANDPS to simulate FABS.
+ setOperationAction(ISD::FABS , MVT::f32, Custom);
+
+ // Use XORP to simulate FNEG.
+ setOperationAction(ISD::FNEG , MVT::f32, Custom);
+
+ setOperationAction(ISD::UNDEF, MVT::f64, Expand);
+
+ // Use ANDPS and ORPS to simulate FCOPYSIGN.
+ setOperationAction(ISD::FCOPYSIGN, MVT::f64, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f32, Custom);
+
+ // We don't support sin/cos/fmod
+ setOperationAction(ISD::FSIN , MVT::f32, Expand);
+ setOperationAction(ISD::FCOS , MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
+
+ // Expand FP immediates into loads from the stack, except for the special
+ // cases we handle.
+ setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
+ addLegalFPImmediate(APFloat(+0.0f)); // xorps
+ addLegalFPImmediate(APFloat(+0.0)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
+
+ // SSE->x87 conversions go through memory.
+ setConvertAction(MVT::f32, MVT::f64, Expand);
+ setConvertAction(MVT::f32, MVT::f80, Expand);
+
+ // x87->SSE truncations need to go through memory.
+ setConvertAction(MVT::f80, MVT::f32, Expand);
+ setConvertAction(MVT::f64, MVT::f32, Expand);
+ // And x87->x87 truncations also.
+ setConvertAction(MVT::f80, MVT::f64, Expand);
+
+ if (!UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , MVT::f64 , Expand);
+ setOperationAction(ISD::FCOS , MVT::f64 , Expand);
+ }
} else {
+ // f32 and f64 in x87.
// Set up the FP register classes.
addRegisterClass(MVT::f64, X86::RFP64RegisterClass);
addRegisterClass(MVT::f32, X86::RFP32RegisterClass);
setOperationAction(ISD::ConstantFP, MVT::f64, Expand);
setOperationAction(ISD::ConstantFP, MVT::f32, Expand);
- addLegalFPImmediate(+0.0); // FLD0
- addLegalFPImmediate(+1.0); // FLD1
- addLegalFPImmediate(-0.0); // FLD0/FCHS
- addLegalFPImmediate(-1.0); // FLD1/FCHS
+ addLegalFPImmediate(APFloat(+0.0)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0)); // FLD1/FCHS
+ addLegalFPImmediate(APFloat(+0.0f)); // FLD0
+ addLegalFPImmediate(APFloat(+1.0f)); // FLD1
+ addLegalFPImmediate(APFloat(-0.0f)); // FLD0/FCHS
+ addLegalFPImmediate(APFloat(-1.0f)); // FLD1/FCHS
}
// Long double always uses X87.
addRegisterClass(MVT::f80, X86::RFP80RegisterClass);
+ setOperationAction(ISD::UNDEF, MVT::f80, Expand);
+ setOperationAction(ISD::FCOPYSIGN, MVT::f80, Expand);
+ setOperationAction(ISD::ConstantFP, MVT::f80, Expand);
+ if (!UnsafeFPMath) {
+ setOperationAction(ISD::FSIN , MVT::f80 , Expand);
+ setOperationAction(ISD::FCOS , MVT::f80 , Expand);
+ }
// First set operation action for all vector types to expand. Then we
// will selectively turn on ones that can be effectively codegen'd.
// If this is an FP return with ScalarSSE, we need to move the value from
// an XMM register onto the fp-stack.
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && RVLocs[0].getValVT()==MVT::f32) ||
+ (X86ScalarSSEf64 && RVLocs[0].getValVT()==MVT::f64)) {
SDOperand MemLoc;
// If this is a load into a scalarsse value, don't store the loaded value
// If we are using ScalarSSE, store ST(0) to the stack and reload it into
// an XMM register.
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && RVLocs[0].getValVT() == MVT::f32) ||
+ (X86ScalarSSEf64 && RVLocs[0].getValVT() == MVT::f64)) {
// FIXME: Currently the FST is flagged to the FP_GET_RESULT. This
// shouldn't be necessary except that RFP cannot be live across
// multiple blocks. When stackifier is fixed, they can be uncoupled.
return VReg;
}
+SDOperand X86TargetLowering::LowerMemArgument(SDOperand Op, SelectionDAG &DAG,
+ const CCValAssign &VA,
+ MachineFrameInfo *MFI,
+ SDOperand Root, unsigned i) {
+ // Create the nodes corresponding to a load from this parameter slot.
+ int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
+ VA.getLocMemOffset());
+ SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
+
+ unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
+
+ if (Flags & ISD::ParamFlags::ByVal)
+ return FIN;
+ else
+ return DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0);
+}
+
SDOperand X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG,
bool isStdCall) {
unsigned NumArgs = Op.Val->getNumValues() - 1;
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
&ArgValues[0], ArgValues.size()).getValue(Op.ResNo);
}
+SDOperand
+X86TargetLowering::LowerMemOpCallTo(SDOperand Op, SelectionDAG &DAG,
+ const SDOperand &StackPtr,
+ const CCValAssign &VA,
+ SDOperand Chain,
+ SDOperand Arg) {
+ SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
+ PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
+ SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
+ unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
+ if (Flags & ISD::ParamFlags::ByVal) {
+ unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
+ ISD::ParamFlags::ByValAlignOffs);
+
+ unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
+ ISD::ParamFlags::ByValSizeOffs;
+
+ SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
+ SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
+
+ return DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, PtrOff, Arg, SizeNode,
+ AlignNode);
+ } else {
+ return DAG.getStore(Chain, Arg, PtrOff, NULL, 0);
+ }
+}
+
SDOperand X86TargetLowering::LowerFastCCCallTo(SDOperand Op, SelectionDAG &DAG,
unsigned CC) {
SDOperand Chain = Op.getOperand(0);
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
+
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
ArgValues.push_back(ArgValue);
} else {
assert(VA.isMemLoc());
-
- // Create the nodes corresponding to a load from this parameter slot.
- int FI = MFI->CreateFixedObject(MVT::getSizeInBits(VA.getValVT())/8,
- VA.getLocMemOffset());
- SDOperand FIN = DAG.getFrameIndex(FI, getPointerTy());
-
- unsigned Flags = cast<ConstantSDNode>(Op.getOperand(3 + i))->getValue();
- if (Flags & ISD::ParamFlags::ByVal)
- ArgValues.push_back(FIN);
- else
- ArgValues.push_back(DAG.getLoad(VA.getValVT(), Root, FIN, NULL, 0));
+ ArgValues.push_back(LowerMemArgument(Op, DAG, VA, MFI, Root, i));
}
}
assert(VA.isMemLoc());
if (StackPtr.Val == 0)
StackPtr = DAG.getRegister(getStackPtrReg(), getPointerTy());
- SDOperand PtrOff = DAG.getConstant(VA.getLocMemOffset(), getPointerTy());
- PtrOff = DAG.getNode(ISD::ADD, getPointerTy(), StackPtr, PtrOff);
-
- SDOperand FlagsOp = Op.getOperand(6+2*VA.getValNo());
- unsigned Flags = cast<ConstantSDNode>(FlagsOp)->getValue();
- if (Flags & ISD::ParamFlags::ByVal) {
- unsigned Align = 1 << ((Flags & ISD::ParamFlags::ByValAlign) >>
- ISD::ParamFlags::ByValAlignOffs);
- unsigned Size = (Flags & ISD::ParamFlags::ByValSize) >>
- ISD::ParamFlags::ByValSizeOffs;
- SDOperand AlignNode = DAG.getConstant(Align, MVT::i32);
- SDOperand SizeNode = DAG.getConstant(Size, MVT::i32);
-
- assert(0 && "Not Implemented");
-
- SDOperand Copy = DAG.getNode(ISD::MEMCPY, MVT::Other, Chain, PtrOff,
- Arg, SizeNode, AlignNode);
- MemOpChains.push_back(Copy);
- }
- else {
- MemOpChains.push_back(DAG.getStore(Chain, Arg, PtrOff, NULL, 0));
- }
+ MemOpChains.push_back(LowerMemOpCallTo(Op, DAG, StackPtr, VA, Chain,
+ Arg));
}
}
// X < 0 -> X == 0, jump on sign.
X86CC = X86::COND_S;
return true;
+ } else if (SetCCOpcode == ISD::SETLT && RHSC->getValue() == 1) {
+ // X < 1 -> X <= 0
+ RHS = DAG.getConstant(0, RHS.getValueType());
+ X86CC = X86::COND_LE;
+ return true;
}
}
return ((isa<ConstantSDNode>(Elt) &&
cast<ConstantSDNode>(Elt)->getValue() == 0) ||
(isa<ConstantFPSDNode>(Elt) &&
- cast<ConstantFPSDNode>(Elt)->isExactlyValue(0.0)));
+ cast<ConstantFPSDNode>(Elt)->getValueAPF().isPosZero()));
}
/// isZeroShuffle - Returns true if N is a VECTOR_SHUFFLE that can be resolved
SDOperand AndNode = DAG.getNode(ISD::AND, MVT::i8, ShAmt,
DAG.getConstant(32, MVT::i8));
SDOperand COps[]={DAG.getEntryNode(), AndNode, DAG.getConstant(0, MVT::i8)};
- SDOperand InFlag = DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
+ SDOperand Cond = NewCCModeling
+ ? DAG.getNode(X86ISD::CMP_NEW, MVT::i32,
+ AndNode, DAG.getConstant(0, MVT::i8))
+ : DAG.getNode(X86ISD::CMP, VTs, 2, COps, 3).getValue(1);
SDOperand Hi, Lo;
SDOperand CC = DAG.getConstant(X86::COND_NE, MVT::i8);
-
+ unsigned Opc = NewCCModeling ? X86ISD::CMOV_NEW : X86ISD::CMOV;
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::Flag);
SmallVector<SDOperand, 4> Ops;
if (Op.getOpcode() == ISD::SHL_PARTS) {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Hi.getValue(1);
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else {
+ Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
+ Cond = Hi.getValue(1);
+ }
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else
+ Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
} else {
Ops.push_back(Tmp2);
Ops.push_back(Tmp3);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Lo = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
- InFlag = Lo.getValue(1);
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Lo = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else {
+ Lo = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
+ Cond = Lo.getValue(1);
+ }
Ops.clear();
Ops.push_back(Tmp3);
Ops.push_back(Tmp1);
Ops.push_back(CC);
- Ops.push_back(InFlag);
- Hi = DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
+ Ops.push_back(Cond);
+ if (NewCCModeling)
+ Hi = DAG.getNode(Opc, MVT::i32, &Ops[0], Ops.size());
+ else
+ Hi = DAG.getNode(Opc, VTs, 2, &Ops[0], Ops.size());
}
VTs = DAG.getNodeValueTypes(MVT::i32, MVT::i32);
return DAG.getNode(ISD::MERGE_VALUES, VTs, 2, &Ops[0], Ops.size());
}
+SDOperand X86TargetLowering::LowerIntegerDivOrRem(SDOperand Op, SelectionDAG &DAG) {
+ unsigned Opcode = Op.getOpcode();
+ MVT::ValueType NVT = Op.getValueType();
+ bool isSigned = Opcode == ISD::SDIV || Opcode == ISD::SREM;
+ bool isDiv = Opcode == ISD::SDIV || Opcode == ISD::UDIV;
+ unsigned Opc = isSigned ? X86ISD::IDIV : X86ISD::DIV;
+
+ SDOperand Ops[] = { Op.getOperand(0), Op.getOperand(1) };
+ SDOperand DR = DAG.getNode(Opc, DAG.getVTList(NVT, NVT), Ops, 2);
+
+ if (isDiv)
+ return DR;
+
+ return SDOperand(DR.Val, 1);
+}
+
SDOperand X86TargetLowering::LowerSINT_TO_FP(SDOperand Op, SelectionDAG &DAG) {
assert(Op.getOperand(0).getValueType() <= MVT::i64 &&
Op.getOperand(0).getValueType() >= MVT::i16 &&
SDOperand Chain = DAG.getStore(DAG.getEntryNode(), Op.getOperand(0),
StackSlot, NULL, 0);
+ // These are really Legal; caller falls through into that case.
+ if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f32 && X86ScalarSSEf32)
+ return Result;
+ if (SrcVT==MVT::i32 && Op.getValueType() == MVT::f64 && X86ScalarSSEf64)
+ return Result;
+ if (SrcVT==MVT::i64 && Op.getValueType() != MVT::f80 &&
+ Subtarget->is64Bit())
+ return Result;
+
// Build the FILD
SDVTList Tys;
- if (X86ScalarSSE)
+ bool useSSE = (X86ScalarSSEf32 && Op.getValueType() == MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getValueType() == MVT::f64);
+ if (useSSE)
Tys = DAG.getVTList(MVT::f64, MVT::Other, MVT::Flag);
else
Tys = DAG.getVTList(Op.getValueType(), MVT::Other);
Ops.push_back(Chain);
Ops.push_back(StackSlot);
Ops.push_back(DAG.getValueType(SrcVT));
- Result = DAG.getNode(X86ScalarSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
+ Result = DAG.getNode(useSSE ? X86ISD::FILD_FLAG :X86ISD::FILD,
Tys, &Ops[0], Ops.size());
- if (X86ScalarSSE) {
+ if (useSSE) {
Chain = Result.getValue(1);
SDOperand InFlag = Result.getValue(2);
"Unknown FP_TO_SINT to lower!");
// We lower FP->sint64 into FISTP64, followed by a load, all to a temporary
// stack slot.
+ SDOperand Result;
MachineFunction &MF = DAG.getMachineFunction();
unsigned MemSize = MVT::getSizeInBits(Op.getValueType())/8;
int SSFI = MF.getFrameInfo()->CreateStackObject(MemSize, MemSize);
SDOperand StackSlot = DAG.getFrameIndex(SSFI, getPointerTy());
+ // These are really Legal.
+ if (Op.getValueType() == MVT::i32 &&
+ X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32)
+ return Result;
+ if (Op.getValueType() == MVT::i32 &&
+ X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)
+ return Result;
+ if (Subtarget->is64Bit() &&
+ Op.getValueType() == MVT::i64 &&
+ Op.getOperand(0).getValueType() != MVT::f80)
+ return Result;
+
unsigned Opc;
switch (Op.getValueType()) {
default: assert(0 && "Invalid FP_TO_SINT to lower!");
SDOperand Chain = DAG.getEntryNode();
SDOperand Value = Op.getOperand(0);
- if (X86ScalarSSE) {
+ if ((X86ScalarSSEf32 && Op.getOperand(0).getValueType() == MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getOperand(0).getValueType() == MVT::f64)) {
assert(Op.getValueType() == MVT::i64 && "Invalid FP_TO_SINT to lower!");
Chain = DAG.getStore(Chain, Value, StackSlot, NULL, 0);
SDVTList Tys = DAG.getVTList(Op.getOperand(0).getValueType(), MVT::Other);
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(OpNTy, BitsToDouble(~(1ULL << 63)));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, ~(1ULL << 63))));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(OpNTy, BitsToFloat(~(1U << 31)));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, ~(1U << 31))));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
const Type *OpNTy = MVT::getTypeForValueType(EltVT);
std::vector<Constant*> CV;
if (EltVT == MVT::f64) {
- Constant *C = ConstantFP::get(OpNTy, BitsToDouble(1ULL << 63));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(64, 1ULL << 63)));
CV.push_back(C);
CV.push_back(C);
} else {
- Constant *C = ConstantFP::get(OpNTy, BitsToFloat(1U << 31));
+ Constant *C = ConstantFP::get(OpNTy, APFloat(APInt(32, 1U << 31)));
CV.push_back(C);
CV.push_back(C);
CV.push_back(C);
if (MVT::getSizeInBits(SrcVT) < MVT::getSizeInBits(VT)) {
Op1 = DAG.getNode(ISD::FP_EXTEND, VT, Op1);
SrcVT = VT;
+ SrcTy = MVT::getTypeForValueType(SrcVT);
}
// First get the sign bit of second operand.
std::vector<Constant*> CV;
if (SrcVT == MVT::f64) {
- CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(1ULL << 63)));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 1ULL << 63))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(1U << 31)));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 1U << 31))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
}
Constant *C = ConstantVector::get(CV);
SDOperand CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
// Clear first operand sign bit.
CV.clear();
if (VT == MVT::f64) {
- CV.push_back(ConstantFP::get(SrcTy, BitsToDouble(~(1ULL << 63))));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, ~(1ULL << 63)))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(64, 0))));
} else {
- CV.push_back(ConstantFP::get(SrcTy, BitsToFloat(~(1U << 31))));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
- CV.push_back(ConstantFP::get(SrcTy, 0.0));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, ~(1U << 31)))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
+ CV.push_back(ConstantFP::get(SrcTy, APFloat(APInt(32, 0))));
}
C = ConstantVector::get(CV);
CPIdx = DAG.getConstantPool(C, getPointerTy(), 4);
}
}
+SDOperand X86TargetLowering::LowerSETCC_New(SDOperand Op, SelectionDAG &DAG) {
+ assert(Op.getValueType() == MVT::i8 && "SetCC type must be 8-bit integer");
+ SDOperand Cond;
+ SDOperand Op0 = Op.getOperand(0);
+ SDOperand Op1 = Op.getOperand(1);
+ SDOperand CC = Op.getOperand(2);
+ ISD::CondCode SetCCOpcode = cast<CondCodeSDNode>(CC)->get();
+ bool isFP = MVT::isFloatingPoint(Op.getOperand(1).getValueType());
+ unsigned X86CC;
+
+ if (translateX86CC(cast<CondCodeSDNode>(CC)->get(), isFP, X86CC,
+ Op0, Op1, DAG)) {
+ Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Op0, Op1);
+ return DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
+ }
+
+ assert(isFP && "Illegal integer SetCC!");
+
+ Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Op0, Op1);
+ switch (SetCCOpcode) {
+ default: assert(false && "Illegal floating point SetCC!");
+ case ISD::SETOEQ: { // !PF & ZF
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_NP, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_E, MVT::i8), Cond);
+ return DAG.getNode(ISD::AND, MVT::i8, Tmp1, Tmp2);
+ }
+ case ISD::SETUNE: { // PF | !ZF
+ SDOperand Tmp1 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_P, MVT::i8), Cond);
+ SDOperand Tmp2 = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86::COND_NE, MVT::i8), Cond);
+ return DAG.getNode(ISD::OR, MVT::i8, Tmp1, Tmp2);
+ }
+ }
+}
+
+
SDOperand X86TargetLowering::LowerSELECT(SDOperand Op, SelectionDAG &DAG) {
bool addTest = true;
SDOperand Chain = DAG.getEntryNode();
// pressure reason)?
SDOperand Cmp = Cond.getOperand(1);
unsigned Opc = Cmp.getOpcode();
- bool IllegalFPCMov = !X86ScalarSSE &&
- MVT::isFloatingPoint(Op.getValueType()) &&
+ bool IllegalFPCMov =
+ ! ((X86ScalarSSEf32 && Op.getValueType()==MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getValueType()==MVT::f64)) &&
!hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
if ((Opc == X86ISD::CMP || Opc == X86ISD::COMI || Opc == X86ISD::UCOMI) &&
!IllegalFPCMov) {
return DAG.getNode(X86ISD::CMOV, VTs, 2, &Ops[0], Ops.size());
}
+SDOperand X86TargetLowering::LowerSELECT_New(SDOperand Op, SelectionDAG &DAG) {
+ bool addTest = true;
+ SDOperand Cond = Op.getOperand(0);
+ SDOperand CC;
+
+ if (Cond.getOpcode() == ISD::SETCC)
+ Cond = LowerSETCC_New(Cond, DAG);
+
+ if (Cond.getOpcode() == X86ISD::SETCC_NEW) {
+ CC = Cond.getOperand(0);
+
+ // If condition flag is set by a X86ISD::CMP, then make a copy of it
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
+ // to use a test instead of duplicating the X86ISD::CMP (for register
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ bool IllegalFPCMov =
+ ! ((X86ScalarSSEf32 && Op.getValueType()==MVT::f32) ||
+ (X86ScalarSSEf64 && Op.getValueType()==MVT::f64)) &&
+ !hasFPCMov(cast<ConstantSDNode>(CC)->getSignExtended());
+ if ((Opc == X86ISD::CMP_NEW ||
+ Opc == X86ISD::COMI_NEW ||
+ Opc == X86ISD::UCOMI_NEW) &&
+ !IllegalFPCMov) {
+ Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1));
+ addTest = false;
+ }
+ }
+
+ if (addTest) {
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Cond = DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond,
+ DAG.getConstant(0, MVT::i8));
+ }
+
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(Op.getValueType(),
+ MVT::Flag);
+ SmallVector<SDOperand, 4> Ops;
+ // X86ISD::CMOV means set the result (which is operand 1) to the RHS if
+ // condition is true.
+ Ops.push_back(Op.getOperand(2));
+ Ops.push_back(Op.getOperand(1));
+ Ops.push_back(CC);
+ Ops.push_back(Cond);
+ return DAG.getNode(X86ISD::CMOV_NEW, VTs, 2, &Ops[0], Ops.size());
+}
+
SDOperand X86TargetLowering::LowerBRCOND(SDOperand Op, SelectionDAG &DAG) {
bool addTest = true;
SDOperand Chain = Op.getOperand(0);
Cond, Op.getOperand(2), CC, Cond.getValue(1));
}
+SDOperand X86TargetLowering::LowerBRCOND_New(SDOperand Op, SelectionDAG &DAG) {
+ bool addTest = true;
+ SDOperand Chain = Op.getOperand(0);
+ SDOperand Cond = Op.getOperand(1);
+ SDOperand Dest = Op.getOperand(2);
+ SDOperand CC;
+
+ if (Cond.getOpcode() == ISD::SETCC)
+ Cond = LowerSETCC_New(Cond, DAG);
+
+ if (Cond.getOpcode() == X86ISD::SETCC_NEW) {
+ CC = Cond.getOperand(0);
+
+ // If condition flag is set by a X86ISD::CMP, then make a copy of it
+ // (since flag operand cannot be shared). Use it as the condition setting
+ // operand in place of the X86ISD::SETCC.
+ // If the X86ISD::SETCC has more than one use, then perhaps it's better
+ // to use a test instead of duplicating the X86ISD::CMP (for register
+ // pressure reason)?
+ SDOperand Cmp = Cond.getOperand(1);
+ unsigned Opc = Cmp.getOpcode();
+ if (Opc == X86ISD::CMP_NEW ||
+ Opc == X86ISD::COMI_NEW ||
+ Opc == X86ISD::UCOMI_NEW) {
+ Cond = DAG.getNode(Opc, MVT::i32, Cmp.getOperand(0), Cmp.getOperand(1));
+ addTest = false;
+ }
+ }
+
+ if (addTest) {
+ CC = DAG.getConstant(X86::COND_NE, MVT::i8);
+ Cond= DAG.getNode(X86ISD::CMP_NEW, MVT::i32, Cond, DAG.getConstant(0, MVT::i8));
+ }
+ return DAG.getNode(X86ISD::BRCOND_NEW, Op.getValueType(),
+ Chain, Op.getOperand(2), CC, Cond);
+}
+
SDOperand X86TargetLowering::LowerCALL(SDOperand Op, SelectionDAG &DAG) {
unsigned CallingConv= cast<ConstantSDNode>(Op.getOperand(1))->getValue();
SDOperand RHS = Op.getOperand(2);
translateX86CC(CC, true, X86CC, LHS, RHS, DAG);
- const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
- SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
- SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
- VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
- SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
- SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
- return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ if (NewCCModeling) {
+ Opc = (Opc == X86ISD::UCOMI) ? X86ISD::UCOMI_NEW : X86ISD::COMI_NEW;
+ SDOperand Cond = DAG.getNode(Opc, MVT::i32, LHS, RHS);
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC_NEW, MVT::i8,
+ DAG.getConstant(X86CC, MVT::i8), Cond);
+ return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ } else {
+ const MVT::ValueType *VTs = DAG.getNodeValueTypes(MVT::Other, MVT::Flag);
+ SDOperand Ops1[] = { DAG.getEntryNode(), LHS, RHS };
+ SDOperand Cond = DAG.getNode(Opc, VTs, 2, Ops1, 3);
+ VTs = DAG.getNodeValueTypes(MVT::i8, MVT::Flag);
+ SDOperand Ops2[] = { DAG.getConstant(X86CC, MVT::i8), Cond };
+ SDOperand SetCC = DAG.getNode(X86ISD::SETCC, VTs, 2, Ops2, 2);
+ return DAG.getNode(ISD::ANY_EXTEND, MVT::i32, SetCC);
+ }
}
}
}
Function *Func = (Function *)
cast<Function>(cast<SrcValueSDNode>(Op.getOperand(5))->getValue());
unsigned CC = Func->getCallingConv();
- unsigned char NestReg;
+ unsigned NestReg;
switch (CC) {
default:
case CallingConv::X86_StdCall: {
// Pass 'nest' parameter in ECX.
// Must be kept in sync with X86CallingConv.td
- NestReg = N86::ECX;
+ NestReg = X86::ECX;
// Check that ECX wasn't needed by an 'inreg' parameter.
const FunctionType *FTy = Func->getFunctionType();
case CallingConv::X86_FastCall:
// Pass 'nest' parameter in EAX.
// Must be kept in sync with X86CallingConv.td
- NestReg = N86::EAX;
+ NestReg = X86::EAX;
break;
}
+ const X86InstrInfo *TII =
+ ((X86TargetMachine&)getTargetMachine()).getInstrInfo();
+
SDOperand OutChains[4];
SDOperand Addr, Disp;
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(10, MVT::i32));
Disp = DAG.getNode(ISD::SUB, MVT::i32, FPtr, Addr);
- const unsigned char MOV32ri = 0xB8;
- const unsigned char JMP = 0xE9;
-
- OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|NestReg, MVT::i8),
+ unsigned char MOV32ri = TII->getBaseOpcodeFor(X86::MOV32ri);
+ unsigned char N86Reg = ((X86RegisterInfo&)RegInfo).getX86RegNum(NestReg);
+ OutChains[0] = DAG.getStore(Root, DAG.getConstant(MOV32ri|N86Reg, MVT::i8),
Trmp, TrmpSV->getValue(), TrmpSV->getOffset());
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(1, MVT::i32));
OutChains[1] = DAG.getStore(Root, Nest, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 1, false, 1);
+ unsigned char JMP = TII->getBaseOpcodeFor(X86::JMP);
Addr = DAG.getNode(ISD::ADD, MVT::i32, Trmp, DAG.getConstant(5, MVT::i32));
OutChains[2] = DAG.getStore(Root, DAG.getConstant(JMP, MVT::i8), Addr,
TrmpSV->getValue() + 5, TrmpSV->getOffset());
OutChains[3] = DAG.getStore(Root, Disp, Addr, TrmpSV->getValue(),
TrmpSV->getOffset() + 6, false, 1);
- return DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4);
+ SDOperand Ops[] =
+ { Trmp, DAG.getNode(ISD::TokenFactor, MVT::Other, OutChains, 4) };
+ return DAG.getNode(ISD::MERGE_VALUES, Op.Val->getVTList(), Ops, 2);
}
}
case ISD::SHL_PARTS:
case ISD::SRA_PARTS:
case ISD::SRL_PARTS: return LowerShift(Op, DAG);
+ case ISD::SDIV:
+ case ISD::UDIV:
+ case ISD::SREM:
+ case ISD::UREM: return LowerIntegerDivOrRem(Op, DAG);
case ISD::SINT_TO_FP: return LowerSINT_TO_FP(Op, DAG);
case ISD::FP_TO_SINT: return LowerFP_TO_SINT(Op, DAG);
case ISD::FABS: return LowerFABS(Op, DAG);
case ISD::FNEG: return LowerFNEG(Op, DAG);
case ISD::FCOPYSIGN: return LowerFCOPYSIGN(Op, DAG);
- case ISD::SETCC: return LowerSETCC(Op, DAG, DAG.getEntryNode());
- case ISD::SELECT: return LowerSELECT(Op, DAG);
- case ISD::BRCOND: return LowerBRCOND(Op, DAG);
+ case ISD::SETCC: return NewCCModeling
+ ? LowerSETCC_New(Op, DAG)
+ : LowerSETCC(Op, DAG, DAG.getEntryNode());
+ case ISD::SELECT: return NewCCModeling
+ ? LowerSELECT_New(Op, DAG)
+ : LowerSELECT(Op, DAG);
+ case ISD::BRCOND: return NewCCModeling
+ ? LowerBRCOND_New(Op, DAG)
+ : LowerBRCOND(Op, DAG);
case ISD::JumpTable: return LowerJumpTable(Op, DAG);
case ISD::CALL: return LowerCALL(Op, DAG);
case ISD::RET: return LowerRET(Op, DAG);
case X86ISD::TAILCALL: return "X86ISD::TAILCALL";
case X86ISD::RDTSC_DAG: return "X86ISD::RDTSC_DAG";
case X86ISD::CMP: return "X86ISD::CMP";
+ case X86ISD::CMP_NEW: return "X86ISD::CMP_NEW";
case X86ISD::COMI: return "X86ISD::COMI";
+ case X86ISD::COMI_NEW: return "X86ISD::COMI_NEW";
case X86ISD::UCOMI: return "X86ISD::UCOMI";
+ case X86ISD::UCOMI_NEW: return "X86ISD::UCOMI_NEW";
case X86ISD::SETCC: return "X86ISD::SETCC";
+ case X86ISD::SETCC_NEW: return "X86ISD::SETCC_NEW";
case X86ISD::CMOV: return "X86ISD::CMOV";
+ case X86ISD::CMOV_NEW: return "X86ISD::CMOV_NEW";
case X86ISD::BRCOND: return "X86ISD::BRCOND";
+ case X86ISD::BRCOND_NEW: return "X86ISD::BRCOND_NEW";
case X86ISD::RET_FLAG: return "X86ISD::RET_FLAG";
case X86ISD::REP_STOS: return "X86ISD::REP_STOS";
case X86ISD::REP_MOVS: return "X86ISD::REP_MOVS";
case X86ISD::TLSADDR: return "X86ISD::TLSADDR";
case X86ISD::THREAD_POINTER: return "X86ISD::THREAD_POINTER";
case X86ISD::EH_RETURN: return "X86ISD::EH_RETURN";
+ case X86ISD::DIV: return "X86ISD::DIV";
+ case X86ISD::IDIV: return "X86ISD::IDIV";
}
}
case X86::CMOV_FR64:
case X86::CMOV_V4F32:
case X86::CMOV_V2F64:
- case X86::CMOV_V2I64: {
+ case X86::CMOV_V2I64:
+
+ case X86::NEW_CMOV_FR32:
+ case X86::NEW_CMOV_FR64:
+ case X86::NEW_CMOV_V4F32:
+ case X86::NEW_CMOV_V2F64:
+ case X86::NEW_CMOV_V2I64: {
// To "insert" a SELECT_CC instruction, we actually have to insert the
// diamond control-flow pattern. The incoming instruction knows the
// destination vreg to set, the condition code register to branch on, the
switch (Opc) {
default: break;
case X86ISD::SETCC:
+ case X86ISD::SETCC_NEW:
KnownZero |= (MVT::getIntVTBitMask(Op.getValueType()) ^ 1ULL);
break;
}
// GCC calls "st(0)" just plain "st".
if (StringsEqualNoCase("{st}", Constraint)) {
Res.first = X86::ST0;
- Res.second = X86::RSTRegisterClass;
+ Res.second = X86::RFP80RegisterClass;
}
return Res;