setOperationAction(ISD::FP_TO_UINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i8 , Promote);
setOperationAction(ISD::FP_TO_UINT , MVT::i16 , Promote);
- setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
+
+ if (!X86ScalarSSE)
+ setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Promote);
// Promote i1/i8 FP_TO_SINT to larger FP_TO_SINTS's, as X86 doesn't have
// this operation.
setOperationAction(ISD::FP_TO_SINT , MVT::i1 , Promote);
setOperationAction(ISD::FP_TO_SINT , MVT::i8 , Promote);
+ setOperationAction(ISD::FP_TO_SINT , MVT::i16 , Promote);
setOperationAction(ISD::BRCONDTWOWAY , MVT::Other, Expand);
+ setOperationAction(ISD::BRTWOWAY_CC , MVT::Other, Expand);
setOperationAction(ISD::MEMMOVE , MVT::Other, Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16 , Expand);
setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1 , Expand);
setOperationAction(ISD::FP_ROUND_INREG , MVT::f32 , Expand);
setOperationAction(ISD::SEXTLOAD , MVT::i1 , Expand);
- setOperationAction(ISD::SREM , MVT::f64 , Expand);
+ setOperationAction(ISD::FREM , MVT::f64 , Expand);
setOperationAction(ISD::CTPOP , MVT::i8 , Expand);
setOperationAction(ISD::CTTZ , MVT::i8 , Expand);
setOperationAction(ISD::CTLZ , MVT::i8 , Expand);
setOperationAction(ISD::SINT_TO_FP, MVT::i16, Promote);
setOperationAction(ISD::FP_TO_SINT, MVT::i16, Promote);
+ // Expand FP_TO_UINT into a select.
+ // FIXME: We would like to use a Custom expander here eventually to do
+ // the optimal thing for SSE vs. the default expansion in the legalizer.
+ setOperationAction(ISD::FP_TO_UINT , MVT::i32 , Expand);
+
// We don't support sin/cos/sqrt/fmod
setOperationAction(ISD::FSIN , MVT::f64, Expand);
setOperationAction(ISD::FCOS , MVT::f64, Expand);
setOperationAction(ISD::FABS , MVT::f64, Expand);
setOperationAction(ISD::FNEG , MVT::f64, Expand);
- setOperationAction(ISD::SREM , MVT::f64, Expand);
+ setOperationAction(ISD::FREM , MVT::f64, Expand);
setOperationAction(ISD::FSIN , MVT::f32, Expand);
setOperationAction(ISD::FCOS , MVT::f32, Expand);
setOperationAction(ISD::FABS , MVT::f32, Expand);
setOperationAction(ISD::FNEG , MVT::f32, Expand);
- setOperationAction(ISD::SREM , MVT::f32, Expand);
+ setOperationAction(ISD::FREM , MVT::f32, Expand);
addLegalFPImmediate(+0.0); // xorps / xorpd
} else {
maxStoresPerMemSet = 8; // For %llvm.memset -> sequence of stores
maxStoresPerMemCpy = 8; // For %llvm.memcpy -> sequence of stores
maxStoresPerMemMove = 8; // For %llvm.memmove -> sequence of stores
- allowUnalignedStores = true; // x86 supports it!
+ allowUnalignedMemoryAccesses = true; // x86 supports it!
}
// Return the number of bytes that a function should pop when it returns (in
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
- SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
- DAG.getEntryNode());
+ SDOperand StackPtr = DAG.getCopyFromReg(DAG.getEntryNode(),
+ X86::ESP, MVT::i32);
std::vector<SDOperand> Stores;
for (unsigned i = 0, e = Args.size(); i != e; ++i) {
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DL : X86::AL,
X86::R8RegisterClass);
- ArgValue = DAG.getCopyFromReg(VReg, MVT::i8, DAG.getRoot());
+ ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i8);
DAG.setRoot(ArgValue.getValue(1));
}
++NumIntRegs;
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF, NumIntRegs ? X86::DX : X86::AX,
X86::R16RegisterClass);
- ArgValue = DAG.getCopyFromReg(VReg, MVT::i16, DAG.getRoot());
+ ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i16);
DAG.setRoot(ArgValue.getValue(1));
}
++NumIntRegs;
if (!I->use_empty()) {
unsigned VReg = AddLiveIn(MF,NumIntRegs ? X86::EDX : X86::EAX,
X86::R32RegisterClass);
- ArgValue = DAG.getCopyFromReg(VReg, MVT::i32, DAG.getRoot());
+ ArgValue = DAG.getCopyFromReg(DAG.getRoot(), VReg, MVT::i32);
DAG.setRoot(ArgValue.getValue(1));
}
++NumIntRegs;
unsigned BotReg = AddLiveIn(MF, X86::EAX, X86::R32RegisterClass);
unsigned TopReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
- SDOperand Low=DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
- SDOperand Hi =DAG.getCopyFromReg(TopReg, MVT::i32, Low.getValue(1));
+ SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
+ SDOperand Hi = DAG.getCopyFromReg(Low.getValue(1), TopReg, MVT::i32);
DAG.setRoot(Hi.getValue(1));
ArgValue = DAG.getNode(ISD::BUILD_PAIR, MVT::i64, Low, Hi);
} else if (NumIntRegs == 1) {
if (!I->use_empty()) {
unsigned BotReg = AddLiveIn(MF, X86::EDX, X86::R32RegisterClass);
- SDOperand Low = DAG.getCopyFromReg(BotReg, MVT::i32, DAG.getRoot());
+ SDOperand Low = DAG.getCopyFromReg(DAG.getRoot(), BotReg, MVT::i32);
DAG.setRoot(Low.getValue(1));
// Load the high part from memory.
// Arguments go on the stack in reverse order, as specified by the ABI.
unsigned ArgOffset = 0;
- SDOperand StackPtr = DAG.getCopyFromReg(X86::ESP, MVT::i32,
- DAG.getEntryNode());
+ SDOperand StackPtr = DAG.getCopyFromReg(DAG.getEntryNode(),
+ X86::ESP, MVT::i32);
NumIntRegs = 0;
std::vector<SDOperand> Stores;
std::vector<SDOperand> RegValuesToPass;
const X86Subtarget *Subtarget;
public:
ISel(TargetMachine &TM) : SelectionDAGISel(X86Lowering), X86Lowering(TM) {
- Subtarget = TM.getSubtarget<const X86Subtarget>();
+ Subtarget = &TM.getSubtarget<X86Subtarget>();
}
virtual const char *getPassName() const {
}
void ISel::EmitFunctionEntryCode(Function &Fn, MachineFunction &MF) {
- // If this function has live-in values, emit the copies from pregs to vregs at
- // the top of the function, before anything else.
- MachineBasicBlock *BB = MF.begin();
- if (MF.livein_begin() != MF.livein_end()) {
- SSARegMap *RegMap = MF.getSSARegMap();
- for (MachineFunction::livein_iterator LI = MF.livein_begin(),
- E = MF.livein_end(); LI != E; ++LI) {
- const TargetRegisterClass *RC = RegMap->getRegClass(LI->second);
- if (RC == X86::R8RegisterClass) {
- BuildMI(BB, X86::MOV8rr, 1, LI->second).addReg(LI->first);
- } else if (RC == X86::R16RegisterClass) {
- BuildMI(BB, X86::MOV16rr, 1, LI->second).addReg(LI->first);
- } else if (RC == X86::R32RegisterClass) {
- BuildMI(BB, X86::MOV32rr, 1, LI->second).addReg(LI->first);
- } else if (RC == X86::RFPRegisterClass) {
- BuildMI(BB, X86::FpMOV, 1, LI->second).addReg(LI->first);
- } else if (RC == X86::RXMMRegisterClass) {
- BuildMI(BB, X86::MOVAPDrr, 1, LI->second).addReg(LI->first);
- } else {
- assert(0 && "Unknown regclass!");
- }
- }
- }
-
-
// If this is main, emit special code for main.
+ MachineBasicBlock *BB = MF.begin();
if (Fn.hasExternalLinkage() && Fn.getName() == "main")
EmitSpecialCodeForMain(BB, MF.getFrameInfo());
}
return false;
}
- SetCCSDNode *SetCC = dyn_cast<SetCCSDNode>(Cond);
- if (SetCC == 0)
+ if (Cond.getOpcode() != ISD::SETCC)
return true; // Can only handle simple setcc's so far.
+ ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
unsigned Opc;
// Handle integer conditions first.
- if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
- switch (SetCC->getCondition()) {
+ if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
+ switch (CC) {
default: assert(0 && "Illegal integer SetCC!");
case ISD::SETEQ: Opc = X86::JE; break;
case ISD::SETGT: Opc = X86::JG; break;
case ISD::SETUGE: Opc = X86::JAE; break;
}
Select(Chain);
- EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
+ EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
BuildMI(BB, Opc, 1).addMBB(Dest);
return false;
}
// 1 | 0 | 0 | X == Y
// 1 | 1 | 1 | unordered
//
- switch (SetCC->getCondition()) {
+ switch (CC) {
default: assert(0 && "Invalid FP setcc!");
case ISD::SETUEQ:
case ISD::SETEQ: Opc = X86::JE; break; // True if ZF = 1
}
Select(Chain);
- EmitCMP(SetCC->getOperand(0), SetCC->getOperand(1), SetCC->hasOneUse());
+ EmitCMP(Cond.getOperand(0), Cond.getOperand(1), Cond.hasOneUse());
BuildMI(BB, Opc, 1).addMBB(Dest);
if (Opc2)
BuildMI(BB, Opc2, 1).addMBB(Dest);
/*CMPNLE*/ 6, /*CMPNLT*/ 5, /*CMPUNORD*/ 3, /*CMPORD*/ 7
};
- SetCCSDNode *SetCC;
- if ((SetCC = dyn_cast<SetCCSDNode>(Cond))) {
- if (MVT::isInteger(SetCC->getOperand(0).getValueType())) {
- switch (SetCC->getCondition()) {
+ if (Cond.getOpcode() == ISD::SETCC) {
+ ISD::CondCode CC = cast<CondCodeSDNode>(Cond.getOperand(2))->get();
+ if (MVT::isInteger(Cond.getOperand(0).getValueType())) {
+ switch (CC) {
default: assert(0 && "Unknown integer comparison!");
case ISD::SETEQ: CondCode = EQ; break;
case ISD::SETGT: CondCode = GT; break;
// 1 | 0 | 0 | X == Y
// 1 | 1 | 1 | unordered
//
- switch (SetCC->getCondition()) {
+ switch (CC) {
default: assert(0 && "Unknown FP comparison!");
case ISD::SETUEQ:
case ISD::SETEQ: CondCode = EQ; break; // True if ZF = 1
break;
}
}
- }
+
- // There's no SSE equivalent of FCMOVE. For cases where we set a condition
- // code above and one of the results of the select is +0.0, then we can fake
- // it up through a clever AND with mask. Otherwise, we will fall through to
- // the code below that will use a PHI node to select the right value.
- if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
- if (SetCC && SetCC->getOperand(0).getValueType() == SVT &&
- NOT_SET != CondCode) {
- ConstantFPSDNode *CT = dyn_cast<ConstantFPSDNode>(True);
- ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(False);
- bool TrueZero = CT && CT->isExactlyValue(0.0);
- bool FalseZero = CF && CF->isExactlyValue(0.0);
- if (TrueZero || FalseZero) {
- SDOperand LHS = Cond.getOperand(0);
- SDOperand RHS = Cond.getOperand(1);
-
- // Select the two halves of the condition
- unsigned RLHS, RRHS;
- if (getRegPressure(LHS) > getRegPressure(RHS)) {
- RLHS = SelectExpr(LHS);
- RRHS = SelectExpr(RHS);
- } else {
- RRHS = SelectExpr(RHS);
- RLHS = SelectExpr(LHS);
- }
-
- // Emit the comparison and generate a mask from it
- unsigned MaskReg = MakeReg(SVT);
- unsigned Opc = (SVT == MVT::f32) ? X86::CMPSSrr : X86::CMPSDrr;
- BuildMI(BB, Opc, 3, MaskReg).addReg(RLHS).addReg(RRHS)
- .addImm(SSE_CMOVTAB[CondCode]);
-
- if (TrueZero) {
- RFalse = SelectExpr(False);
- Opc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
- BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RFalse);
- } else {
- RTrue = SelectExpr(True);
- Opc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
- BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RTrue);
+ // There's no SSE equivalent of FCMOVE. For cases where we set a condition
+ // code above and one of the results of the select is +0.0, then we can fake
+ // it up through a clever AND with mask. Otherwise, we will fall through to
+ // the code below that will use a PHI node to select the right value.
+ if (X86ScalarSSE && (SVT == MVT::f32 || SVT == MVT::f64)) {
+ if (Cond.getOperand(0).getValueType() == SVT &&
+ NOT_SET != CondCode) {
+ ConstantFPSDNode *CT = dyn_cast<ConstantFPSDNode>(True);
+ ConstantFPSDNode *CF = dyn_cast<ConstantFPSDNode>(False);
+ bool TrueZero = CT && CT->isExactlyValue(0.0);
+ bool FalseZero = CF && CF->isExactlyValue(0.0);
+ if (TrueZero || FalseZero) {
+ SDOperand LHS = Cond.getOperand(0);
+ SDOperand RHS = Cond.getOperand(1);
+
+ // Select the two halves of the condition
+ unsigned RLHS, RRHS;
+ if (getRegPressure(LHS) > getRegPressure(RHS)) {
+ RLHS = SelectExpr(LHS);
+ RRHS = SelectExpr(RHS);
+ } else {
+ RRHS = SelectExpr(RHS);
+ RLHS = SelectExpr(LHS);
+ }
+
+ // Emit the comparison and generate a mask from it
+ unsigned MaskReg = MakeReg(SVT);
+ unsigned Opc = (SVT == MVT::f32) ? X86::CMPSSrr : X86::CMPSDrr;
+ BuildMI(BB, Opc, 3, MaskReg).addReg(RLHS).addReg(RRHS)
+ .addImm(SSE_CMOVTAB[CondCode]);
+
+ if (TrueZero) {
+ RFalse = SelectExpr(False);
+ Opc = (SVT == MVT::f32) ? X86::ANDNPSrr : X86::ANDNPDrr;
+ BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RFalse);
+ } else {
+ RTrue = SelectExpr(True);
+ Opc = (SVT == MVT::f32) ? X86::ANDPSrr : X86::ANDPDrr;
+ BuildMI(BB, Opc, 2, RDest).addReg(MaskReg).addReg(RTrue);
+ }
+ return;
}
- return;
}
}
}
unsigned ISel::SelectExpr(SDOperand N) {
unsigned Result;
- unsigned Tmp1, Tmp2, Tmp3;
- unsigned Opc = 0;
+ unsigned Tmp1 = 0, Tmp2 = 0, Tmp3 = 0, Opc = 0;
SDNode *Node = N.Val;
SDOperand Op0, Op1;
if (Node->getOpcode() == ISD::CopyFromReg) {
- if (MRegisterInfo::isVirtualRegister(cast<RegSDNode>(Node)->getReg()) ||
- cast<RegSDNode>(Node)->getReg() == X86::ESP) {
- // Just use the specified register as our input.
- return cast<RegSDNode>(Node)->getReg();
- }
+ unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
+ // Just use the specified register as our input if we can.
+ if (MRegisterInfo::isVirtualRegister(Reg) || Reg == X86::ESP)
+ return Reg;
}
unsigned &Reg = ExprMap[N];
Reg = Result = ExprMap[N.getValue(0)] =
MakeReg(N.getValue(0).getValueType());
}
+ Tmp1 = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
switch (Node->getValueType(0)) {
default: assert(0 && "Cannot CopyFromReg this!");
case MVT::i1:
case MVT::i8:
- BuildMI(BB, X86::MOV8rr, 1,
- Result).addReg(cast<RegSDNode>(Node)->getReg());
+ BuildMI(BB, X86::MOV8rr, 1, Result).addReg(Tmp1);
return Result;
case MVT::i16:
- BuildMI(BB, X86::MOV16rr, 1,
- Result).addReg(cast<RegSDNode>(Node)->getReg());
+ BuildMI(BB, X86::MOV16rr, 1, Result).addReg(Tmp1);
return Result;
case MVT::i32:
- BuildMI(BB, X86::MOV32rr, 1,
- Result).addReg(cast<RegSDNode>(Node)->getReg());
+ BuildMI(BB, X86::MOV32rr, 1, Result).addReg(Tmp1);
return Result;
}
addFrameReference(BuildMI(BB, X86::LEA32r, 4, Result), (int)Tmp1);
return Result;
case ISD::ConstantPool:
- Tmp1 = cast<ConstantPoolSDNode>(N)->getIndex();
+ Tmp1 = BB->getParent()->getConstantPool()->
+ getConstantPoolIndex(cast<ConstantPoolSDNode>(N)->get());
addConstantPoolReference(BuildMI(BB, X86::LEA32r, 4, Result), Tmp1);
return Result;
case ISD::ConstantFP:
BuildMI(BB, X86::MOV32ri, 1, Result).addExternalSymbol(Sym);
return Result;
}
+ case ISD::ANY_EXTEND: // treat any extend like zext
case ISD::ZERO_EXTEND: {
int DestIs16 = N.getValueType() == MVT::i16;
int SrcIs16 = N.getOperand(0).getValueType() == MVT::i16;
}
return Result;
+ case ISD::FADD:
case ISD::ADD:
Op0 = N.getOperand(0);
Op1 = N.getOperand(1);
return Result;
}
+ case ISD::FSUB:
+ case ISD::FMUL:
case ISD::SUB:
case ISD::MUL:
case ISD::AND:
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
+ case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
+ case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
}
if (isFoldableLoad(Op0, Op1, true))
- if (Node->getOpcode() != ISD::SUB) {
+ if (Node->getOpcode() != ISD::SUB && Node->getOpcode() != ISD::FSUB) {
std::swap(Op0, Op1);
goto FoldOps;
} else {
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
+ case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
+ case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
}
switch (Node->getOpcode()) {
default: assert(0 && "Unreachable!");
+ case ISD::FSUB:
case ISD::SUB: Opc = X86ScalarSSE ? SSE_SUBTab[Opc] : SUBTab[Opc]; break;
+ case ISD::FMUL:
case ISD::MUL: Opc = X86ScalarSSE ? SSE_MULTab[Opc] : MULTab[Opc]; break;
case ISD::AND: Opc = ANDTab[Opc]; break;
case ISD::OR: Opc = ORTab[Opc]; break;
N.getValueType(), Result);
return Result;
+ case ISD::FDIV:
+ case ISD::FREM:
case ISD::SDIV:
case ISD::UDIV:
case ISD::SREM:
assert((N.getOpcode() != ISD::SREM || MVT::isInteger(N.getValueType())) &&
"We don't support this operator!");
- if (N.getOpcode() == ISD::SDIV) {
+ if (N.getOpcode() == ISD::SDIV || N.getOpcode() == ISD::FDIV) {
// We can fold loads into FpDIVs, but not really into any others.
if (N.getValueType() == MVT::f64 && !X86ScalarSSE) {
// Check for reversed and unreversed DIV.
case MVT::i32: Opc = X86::SHL32rCL; break;
}
BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
- BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
+ BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
return Result;
case ISD::SRL:
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
case MVT::i32: Opc = X86::SHR32rCL; break;
}
BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
- BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
+ BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
return Result;
case ISD::SRA:
if (ConstantSDNode *CN = dyn_cast<ConstantSDNode>(N.getOperand(1))) {
case MVT::i32: Opc = X86::SAR32rCL; break;
}
BuildMI(BB, X86::MOV8rr, 1, X86::CL).addReg(Tmp2);
- BuildMI(BB, Opc, 2, Result).addReg(Tmp1).addReg(Tmp2);
+ BuildMI(BB, Opc, 1, Result).addReg(Tmp1);
return Result;
case ISD::SETCC:
EmitCMP(N.getOperand(0), N.getOperand(1), Node->hasOneUse());
- EmitSetCC(BB, Result, cast<SetCCSDNode>(N)->getCondition(),
+ EmitSetCC(BB, Result, cast<CondCodeSDNode>(N.getOperand(2))->get(),
MVT::isFloatingPoint(N.getOperand(1).getValueType()));
return Result;
case ISD::LOAD:
}
if (ConstantPoolSDNode *CP = dyn_cast<ConstantPoolSDNode>(N.getOperand(1))){
+ unsigned CPIdx = BB->getParent()->getConstantPool()->
+ getConstantPoolIndex(CP->get());
Select(N.getOperand(0));
- addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CP->getIndex());
+ addConstantPoolReference(BuildMI(BB, Opc, 4, Result), CPIdx);
} else {
X86AddressMode AM;
if (Node->getValueType(0) == MVT::f64) {
assert(cast<VTSDNode>(Node->getOperand(3))->getVT() == MVT::f32 &&
"Bad EXTLOAD!");
- addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result),
- CP->getIndex());
+ unsigned CPIdx = BB->getParent()->getConstantPool()->
+ getConstantPoolIndex(CP->get());
+
+ addConstantPoolReference(BuildMI(BB, X86::FLD32m, 4, Result), CPIdx);
return Result;
}
default:
std::cerr << "CANNOT [mem] op= val: ";
StVal.Val->dump(); std::cerr << "\n";
+ case ISD::FMUL:
case ISD::MUL:
+ case ISD::FDIV:
case ISD::SDIV:
case ISD::UDIV:
+ case ISD::FREM:
case ISD::SREM:
case ISD::UREM: return false;
// If we have [mem] = V op [mem], try to turn it into:
// [mem] = [mem] op V.
- if (Op1 == TheLoad && StVal.getOpcode() != ISD::SUB &&
+ if (Op1 == TheLoad &&
+ StVal.getOpcode() != ISD::SUB && StVal.getOpcode() != ISD::FSUB &&
StVal.getOpcode() != ISD::SHL && StVal.getOpcode() != ISD::SRA &&
StVal.getOpcode() != ISD::SRL)
std::swap(Op0, Op1);
SelectionDAG &DAG) {
MVT::ValueType StoreVT;
switch (Chain.getOpcode()) {
+ default: assert(0 && "Unexpected node!");
case ISD::CALLSEQ_START:
// If we found the start of the call sequence, we're done. We actually
// strip off the CALLSEQ_START node, to avoid generating the
if (OrigDest.getOpcode() == ISD::CopyFromReg) {
OrigOffset = 0;
- assert(cast<RegSDNode>(OrigDest)->getReg() == X86::ESP);
+ assert(cast<RegisterSDNode>(OrigDest.getOperand(1))->getReg() == X86::ESP);
} else {
// We expect only (ESP+C)
assert(OrigDest.getOpcode() == ISD::ADD &&
isa<ConstantSDNode>(OrigDest.getOperand(1)) &&
OrigDest.getOperand(0).getOpcode() == ISD::CopyFromReg &&
- cast<RegSDNode>(OrigDest.getOperand(0))->getReg() == X86::ESP);
+ cast<RegisterSDNode>(OrigDest.getOperand(0).getOperand(1))->getReg()
+ == X86::ESP);
OrigOffset = cast<ConstantSDNode>(OrigDest.getOperand(1))->getValue();
}
void ISel::Select(SDOperand N) {
- unsigned Tmp1, Tmp2, Opc;
+ unsigned Tmp1 = 0, Tmp2 = 0, Opc = 0;
if (!ExprMap.insert(std::make_pair(N, 1)).second)
return; // Already selected.
}
return;
case ISD::CopyToReg:
- if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(1))) {
+ if (getRegPressure(N.getOperand(0)) > getRegPressure(N.getOperand(2))) {
Select(N.getOperand(0));
- Tmp1 = SelectExpr(N.getOperand(1));
+ Tmp1 = SelectExpr(N.getOperand(2));
} else {
- Tmp1 = SelectExpr(N.getOperand(1));
+ Tmp1 = SelectExpr(N.getOperand(2));
Select(N.getOperand(0));
}
- Tmp2 = cast<RegSDNode>(N)->getReg();
+ Tmp2 = cast<RegisterSDNode>(N.getOperand(1))->getReg();
if (Tmp1 != Tmp2) {
- switch (N.getOperand(1).getValueType()) {
+ switch (N.getOperand(2).getValueType()) {
default: assert(0 && "Invalid type for operation!");
case MVT::i1:
case MVT::i8: Opc = X86::MOV8rr; break;