addLegalAddressScale(3);
// Set up the register classes.
- addRegisterClass(MVT::i8, X86::R8RegisterClass);
- addRegisterClass(MVT::i16, X86::R16RegisterClass);
- addRegisterClass(MVT::i32, X86::R32RegisterClass);
+ addRegisterClass(MVT::i8, X86::GR8RegisterClass);
+ addRegisterClass(MVT::i16, X86::GR16RegisterClass);
+ addRegisterClass(MVT::i32, X86::GR32RegisterClass);
// Promote all UINT_TO_FP to larger SINT_TO_FP's, as X86 doesn't have this
// operation.
addRegisterClass(MVT::f32, X86::FR32RegisterClass);
addRegisterClass(MVT::f64, X86::FR64RegisterClass);
- // SSE has no load+extend ops
- setOperationAction(ISD::EXTLOAD, MVT::f32, Expand);
- setOperationAction(ISD::ZEXTLOAD, MVT::f32, Expand);
-
// Use ANDPD to simulate FABS.
setOperationAction(ISD::FABS , MVT::f64, Custom);
setOperationAction(ISD::FABS , MVT::f32, Custom);
FormalArgs.clear();
FormalArgLocs.clear();
- // This sets BytesToPopOnReturn, BytesCallerReserves, etc. which have to be set
- // before the rest of the function can be lowered.
+ // This sets BytesToPopOnReturn, BytesCallerReserves, etc. which have to be
+ // set before the rest of the function can be lowered.
if (F.getCallingConv() == CallingConv::Fast && EnableFastCC)
PreprocessFastCCArguments(Args, F, DAG);
else
}
void X86TargetLowering::LowerCCCArguments(SDOperand Op, SelectionDAG &DAG) {
- unsigned NumArgs = Op.Val->getNumValues();
+ unsigned NumArgs = Op.Val->getNumValues() - 1;
MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
for (unsigned i = 0; i < NumArgs; ++i) {
std::pair<FALocInfo, FALocInfo> Loc = FormalArgLocs[i];
SDOperand ArgValue;
if (Loc.first.Kind == FALocInfo::StackFrameLoc) {
- // Create the SelectionDAG nodes corresponding to a load from this parameter
+ // Create the SelectionDAG nodes corresponding to a load from this
+ // parameter.
unsigned FI = FormalArgLocs[i].first.Loc;
SDOperand FIN = DAG.getFrameIndex(FI, MVT::i32);
ArgValue = DAG.getLoad(Op.Val->getValueType(i),
}
FormalArgs.push_back(ArgValue);
}
+ // Provide a chain. Note that this isn't the right one, but it works as well
+ // as before.
+ FormalArgs.push_back(DAG.getEntryNode());
}
std::pair<SDOperand, SDOperand>
unsigned CCReg = XMMArgRegs[i];
SDOperand RegToPass = RegValuesToPass[i];
assert(RegToPass.getValueType() == MVT::Vector);
- unsigned NumElems = cast<ConstantSDNode>(*(RegToPass.Val->op_end()-2))->getValue();
+ unsigned NumElems =
+ cast<ConstantSDNode>(*(RegToPass.Val->op_end()-2))->getValue();
MVT::ValueType EVT = cast<VTSDNode>(*(RegToPass.Val->op_end()-1))->getVT();
MVT::ValueType PVT = getVectorType(EVT, NumElems);
SDOperand CCRegNode = DAG.getRegister(CCReg, PVT);
case MVT::i1:
case MVT::i8:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
- X86::R8RegisterClass);
+ X86::GR8RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i8;
break;
case MVT::i16:
Reg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
- X86::R16RegisterClass);
+ X86::GR16RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i16;
break;
case MVT::i32:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::R32RegisterClass);
+ X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
break;
case MVT::i64:
Reg = AddLiveIn(MF, NumIntRegs ? X86::EDX : X86::EAX,
- X86::R32RegisterClass);
+ X86::GR32RegisterClass);
Loc.first.Kind = FALocInfo::LiveInRegLoc;
Loc.first.Loc = Reg;
Loc.first.Typ = MVT::i32;
if (ObjIntRegs == 2) {
- Reg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
+ Reg = AddLiveIn(MF, X86::EDX, X86::GR32RegisterClass);
Loc.second.Kind = FALocInfo::LiveInRegLoc;
Loc.second.Loc = Reg;
Loc.second.Typ = MVT::i32;
void
X86TargetLowering::LowerFastCCArguments(SDOperand Op, SelectionDAG &DAG) {
- unsigned NumArgs = Op.Val->getNumValues();
+ unsigned NumArgs = Op.Val->getNumValues()-1;
MachineFunction &MF = DAG.getMachineFunction();
- MachineFrameInfo *MFI = MF.getFrameInfo();
for (unsigned i = 0; i < NumArgs; ++i) {
MVT::ValueType VT = Op.Val->getValueType(i);
std::pair<FALocInfo, FALocInfo> Loc = FormalArgLocs[i];
SDOperand ArgValue;
if (Loc.first.Kind == FALocInfo::StackFrameLoc) {
- // Create the SelectionDAG nodes corresponding to a load from this parameter
+ // Create the SelectionDAG nodes corresponding to a load from this
+ // parameter.
SDOperand FIN = DAG.getFrameIndex(Loc.first.Loc, MVT::i32);
- ArgValue = DAG.getLoad(Op.Val->getValueType(i),DAG.getEntryNode(), FIN,
+ ArgValue = DAG.getLoad(Op.Val->getValueType(i), DAG.getEntryNode(), FIN,
DAG.getSrcValue(NULL));
} else {
// Must be a CopyFromReg
if (Loc.second.Kind != FALocInfo::None) {
SDOperand ArgValue2;
if (Loc.second.Kind == FALocInfo::StackFrameLoc) {
- // Create the SelectionDAG nodes corresponding to a load from this parameter
+ // Create the SelectionDAG nodes corresponding to a load from this
+ // parameter.
SDOperand FIN = DAG.getFrameIndex(Loc.second.Loc, MVT::i32);
- ArgValue2 = DAG.getLoad(Op.Val->getValueType(i),DAG.getEntryNode(), FIN,
- DAG.getSrcValue(NULL));
+ ArgValue2 = DAG.getLoad(Op.Val->getValueType(i), DAG.getEntryNode(),
+ FIN, DAG.getSrcValue(NULL));
} else {
// Must be a CopyFromReg
ArgValue2 = DAG.getCopyFromReg(DAG.getEntryNode(),
}
FormalArgs.push_back(ArgValue);
}
+
+ // Provide a chain. Note that this isn't the right one, but it works as well
+ // as before.
+ FormalArgs.push_back(DAG.getEntryNode());
}
std::pair<SDOperand, SDOperand>
Ops.push_back(InFlag);
// FIXME: Do not generate X86ISD::TAILCALL for now.
- Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL, NodeTys, Ops);
+ Chain = DAG.getNode(isTailCall ? X86ISD::TAILCALL : X86ISD::CALL,
+ NodeTys, Ops);
InFlag = Chain.getValue(1);
NodeTys.clear();
// Load the old value of the high byte of the control word...
unsigned OldCW =
- F->getSSARegMap()->createVirtualRegister(X86::R16RegisterClass);
+ F->getSSARegMap()->createVirtualRegister(X86::GR16RegisterClass);
addFrameReference(BuildMI(BB, X86::MOV16rm, 4, OldCW), CWFrameIdx);
// Set the high part to be round to zero...
}
}
- // Take advantage of the fact R32 to VR128 scalar_to_vector (i.e. movd)
+ // Take advantage of the fact GR32 to VR128 scalar_to_vector (i.e. movd)
// clears the upper bits.
// FIXME: we can do the same for v4f32 case when we know both parts of
// the lower half come from scalar_to_vector (loadf32). We should do
}
}
- SDOperand LoShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, LoMask));
- SDOperand HiShuffle = DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
- DAG.getNode(ISD::BUILD_VECTOR, MaskVT, HiMask));
+ SDOperand LoShuffle =
+ DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT, LoMask));
+ SDOperand HiShuffle =
+ DAG.getNode(ISD::VECTOR_SHUFFLE, VT, V1, V2,
+ DAG.getNode(ISD::BUILD_VECTOR, MaskVT, HiMask));
std::vector<SDOperand> MaskOps;
for (unsigned i = 0; i != NumElems; ++i) {
if (Locs[i].first == -1) {
SDOperand
X86TargetLowering::LowerINSERT_VECTOR_ELT(SDOperand Op, SelectionDAG &DAG) {
- // Transform it so it match pinsrw which expects a 16-bit value in a R32
+ // Transform it so it match pinsrw which expects a 16-bit value in a GR32
// as its second argument.
MVT::ValueType VT = Op.getValueType();
MVT::ValueType BaseVT = MVT::getVectorBaseType(VT);
Idx <<= 1;
if (MVT::isFloatingPoint(N1.getValueType())) {
if (N1.getOpcode() == ISD::LOAD) {
- // Just load directly from f32mem to R32.
+ // Just load directly from f32mem to GR32.
N1 = DAG.getLoad(MVT::i32, N1.getOperand(0), N1.getOperand(1),
N1.getOperand(2));
} else {
X86TargetLowering::LowerGlobalAddress(SDOperand Op, SelectionDAG &DAG) {
GlobalValue *GV = cast<GlobalAddressSDNode>(Op)->getGlobal();
SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
- DAG.getTargetGlobalAddress(GV, getPointerTy()));
+ DAG.getTargetGlobalAddress(GV,
+ getPointerTy()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
- DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
+ DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
+ Result);
// For Darwin, external and weak symbols are indirect, so we want to load
// the value at address GV, not the value of GV itself. This means that
X86TargetLowering::LowerExternalSymbol(SDOperand Op, SelectionDAG &DAG) {
const char *Sym = cast<ExternalSymbolSDNode>(Op)->getSymbol();
SDOperand Result = DAG.getNode(X86ISD::Wrapper, getPointerTy(),
- DAG.getTargetExternalSymbol(Sym, getPointerTy()));
+ DAG.getTargetExternalSymbol(Sym,
+ getPointerTy()));
if (Subtarget->isTargetDarwin()) {
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
- DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
+ DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
+ Result);
}
return Result;
// With PIC, the address is actually $g + Offset.
if (getTargetMachine().getRelocationModel() == Reloc::PIC)
Result = DAG.getNode(ISD::ADD, getPointerTy(),
- DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()), Result);
+ DAG.getNode(X86ISD::GlobalBaseReg, getPointerTy()),
+ Result);
}
return Result;
SDOperand
X86TargetLowering::LowerFORMAL_ARGUMENTS(SDOperand Op, SelectionDAG &DAG) {
if (FormalArgs.size() == 0) {
- unsigned CC = cast<ConstantSDNode>(Op.getOperand(0))->getValue();
+ unsigned CC = cast<ConstantSDNode>(Op.getOperand(1))->getValue();
if (CC == CallingConv::Fast && EnableFastCC)
LowerFastCCArguments(Op, DAG);
else
LowerCCCArguments(Op, DAG);
}
- return FormalArgs[Op.ResNo];
+
+ // Return the new list of results.
+ std::vector<MVT::ValueType> RetVTs(Op.Val->value_begin(),
+ Op.Val->value_end());
+ return DAG.getNode(ISD::MERGE_VALUES, RetVTs, FormalArgs);
}
SDOperand X86TargetLowering::LowerMEMSET(SDOperand Op, SelectionDAG &DAG) {
if ((Align & 3) != 0 ||
(I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
MVT::ValueType IntPtr = getPointerTy();
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData()->getIntPtrType();
std::vector<std::pair<SDOperand, const Type*> > Args;
Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
// Extend the ubyte argument to be an int value for the call.
if ((Align & 3) != 0 ||
(I && I->getValue() < Subtarget->getMinRepStrSizeThreshold())) {
MVT::ValueType IntPtr = getPointerTy();
- const Type *IntPtrTy = getTargetData().getIntPtrType();
+ const Type *IntPtrTy = getTargetData()->getIntPtrType();
std::vector<std::pair<SDOperand, const Type*> > Args;
Args.push_back(std::make_pair(Op.getOperand(1), IntPtrTy));
Args.push_back(std::make_pair(Op.getOperand(2), IntPtrTy));
default: break; // Unknown constriant letter
case 'r': // GENERAL_REGS
case 'R': // LEGACY_REGS
- return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
- X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
+ if (VT == MVT::i32)
+ return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
+ X86::ESI, X86::EDI, X86::EBP, X86::ESP, 0);
+ else if (VT == MVT::i16)
+ return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
+ X86::SI, X86::DI, X86::BP, X86::SP, 0);
+ else if (VT == MVT::i8)
+ return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
+ break;
case 'l': // INDEX_REGS
- return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX,
- X86::ESI, X86::EDI, X86::EBP, 0);
+ if (VT == MVT::i32)
+ return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX,
+ X86::ESI, X86::EDI, X86::EBP, 0);
+ else if (VT == MVT::i16)
+ return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX,
+ X86::SI, X86::DI, X86::BP, 0);
+ else if (VT == MVT::i8)
+ return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
+ break;
case 'q': // Q_REGS (GENERAL_REGS in 64-bit mode)
case 'Q': // Q_REGS
- return make_vector<unsigned>(X86::EAX, X86::EBX, X86::ECX, X86::EDX, 0);
+ if (VT == MVT::i32)
+ return make_vector<unsigned>(X86::EAX, X86::EDX, X86::ECX, X86::EBX, 0);
+ else if (VT == MVT::i16)
+ return make_vector<unsigned>(X86::AX, X86::DX, X86::CX, X86::BX, 0);
+ else if (VT == MVT::i8)
+ return make_vector<unsigned>(X86::AL, X86::DL, X86::CL, X86::DL, 0);
+ break;
case 'x': // SSE_REGS if SSE1 allowed
if (Subtarget->hasSSE1())
return make_vector<unsigned>(X86::XMM0, X86::XMM1, X86::XMM2, X86::XMM3,