{ X86::DIVPSrr, X86::DIVPSrm },
{ X86::DIVSDrr, X86::DIVSDrm },
{ X86::DIVSSrr, X86::DIVSSrm },
+ { X86::FsANDNPDrr, X86::FsANDNPDrm },
+ { X86::FsANDNPSrr, X86::FsANDNPSrm },
+ { X86::FsANDPDrr, X86::FsANDPDrm },
+ { X86::FsANDPSrr, X86::FsANDPSrm },
+ { X86::FsORPDrr, X86::FsORPDrm },
+ { X86::FsORPSrr, X86::FsORPSrm },
+ { X86::FsXORPDrr, X86::FsXORPDrm },
+ { X86::FsXORPSrr, X86::FsXORPSrm },
{ X86::HADDPDrr, X86::HADDPDrm },
{ X86::HADDPSrr, X86::HADDPSrm },
{ X86::HSUBPDrr, X86::HSUBPDrm },
{ X86::PMAXUBrr, X86::PMAXUBrm },
{ X86::PMINSWrr, X86::PMINSWrm },
{ X86::PMINUBrr, X86::PMINUBrm },
+ { X86::PMULDQrr, X86::PMULDQrm },
+ { X86::PMULDQrr_int, X86::PMULDQrm_int },
{ X86::PMULHUWrr, X86::PMULHUWrm },
{ X86::PMULHWrr, X86::PMULHWrm },
+ { X86::PMULLDrr, X86::PMULLDrm },
+ { X86::PMULLDrr_int, X86::PMULLDrm_int },
{ X86::PMULLWrr, X86::PMULLWrm },
{ X86::PMULUDQrr, X86::PMULUDQrm },
{ X86::PORrr, X86::PORrm },
return TM.getSubtarget<X86Subtarget>().GVRequiresExtraLoad(GV, TM, false);
}
-bool X86InstrInfo::isReallyTriviallyReMaterializable(MachineInstr *MI) const {
+bool
+X86InstrInfo::isReallyTriviallyReMaterializable(const MachineInstr *MI) const {
switch (MI->getOpcode()) {
default: break;
case X86::MOV8rm:
/// commuteInstruction - We have a few instructions that must be hacked on to
/// commute them.
///
-MachineInstr *X86InstrInfo::commuteInstruction(MachineInstr *MI) const {
+MachineInstr *
+X86InstrInfo::commuteInstruction(MachineInstr *MI, bool NewMI) const {
switch (MI->getOpcode()) {
case X86::SHRD16rri8: // A = SHRD16rri8 B, C, I -> A = SHLD16rri8 C, B, (16-I)
case X86::SHLD16rri8: // A = SHLD16rri8 B, C, I -> A = SHRD16rri8 C, B, (16-I)
// Fallthrough intended.
}
default:
- return TargetInstrInfoImpl::commuteInstruction(MI);
+ return TargetInstrInfoImpl::commuteInstruction(MI, NewMI);
}
}
// Emit the load instruction.
SDNode *Load = 0;
if (FoldedLoad) {
- MVT::ValueType VT = *RC->vt_begin();
+ MVT VT = *RC->vt_begin();
Load = DAG.getTargetNode(getLoadRegOpcode(RC, RI.getStackAlignment()), VT,
MVT::Other, &AddrOps[0], AddrOps.size());
NewNodes.push_back(Load);
}
// Emit the data processing instruction.
- std::vector<MVT::ValueType> VTs;
+ std::vector<MVT> VTs;
const TargetRegisterClass *DstRC = 0;
if (TID.getNumDefs() > 0) {
const TargetOperandInfo &DstTOI = TID.OpInfo[0];
VTs.push_back(*DstRC->vt_begin());
}
for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
- MVT::ValueType VT = N->getValueType(i);
+ MVT VT = N->getValueType(i);
if (VT != MVT::Other && i >= (unsigned)TID.getNumDefs())
VTs.push_back(VT);
}
unsigned X86InstrInfo::GetInstSizeInBytes(const MachineInstr *MI) const {
const TargetInstrDesc &Desc = MI->getDesc();
bool IsPIC = (TM.getRelocationModel() == Reloc::PIC_);
- bool Is64BitMode = ((X86Subtarget*)TM.getSubtargetImpl())->is64Bit();
+ bool Is64BitMode = TM.getSubtargetImpl()->is64Bit();
unsigned Size = GetInstSizeWithDesc(*MI, &Desc, IsPIC, Is64BitMode);
if (Desc.getOpcode() == X86::MOVPC32r) {
Size += GetInstSizeWithDesc(*MI, &get(X86::POP32r), IsPIC, Is64BitMode);