#include "llvm/Support/Debug.h"
#include "llvm/Support/Compiler.h"
#include "llvm/ADT/BitVector.h"
+#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallSet.h"
#include <algorithm>
using namespace llvm;
-STATISTIC(NumSpills, "Number of register spills");
-STATISTIC(NumPSpills,"Number of physical register spills");
-STATISTIC(NumReMats, "Number of re-materialization");
-STATISTIC(NumDRM , "Number of re-materializable defs elided");
-STATISTIC(NumStores, "Number of stores added");
-STATISTIC(NumLoads , "Number of loads added");
-STATISTIC(NumReused, "Number of values reused");
-STATISTIC(NumDSE , "Number of dead stores elided");
-STATISTIC(NumDCE , "Number of copies elided");
-STATISTIC(NumDSS , "Number of dead spill slots removed");
+STATISTIC(NumSpills , "Number of register spills");
+STATISTIC(NumPSpills , "Number of physical register spills");
+STATISTIC(NumReMats , "Number of re-materialization");
+STATISTIC(NumDRM , "Number of re-materializable defs elided");
+STATISTIC(NumStores , "Number of stores added");
+STATISTIC(NumLoads , "Number of loads added");
+STATISTIC(NumReused , "Number of values reused");
+STATISTIC(NumDSE , "Number of dead stores elided");
+STATISTIC(NumDCE , "Number of copies elided");
+STATISTIC(NumDSS , "Number of dead spill slots removed");
+STATISTIC(NumCommutes, "Number of instructions commuted");
namespace {
enum SpillerName { simple, local };
SpillerOpt("spiller",
cl::desc("Spiller to use: (default: local)"),
cl::Prefix,
- cl::values(clEnumVal(simple, " simple spiller"),
- clEnumVal(local, " local spiller"),
+ cl::values(clEnumVal(simple, "simple spiller"),
+ clEnumVal(local, "local spiller"),
clEnumValEnd),
cl::init(local));
LowSpillSlot = SS;
if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
HighSpillSlot = SS;
- I->second = SS;
+ EmergencySpillSlots[RC] = SS;
return SS;
}
// instruction selection and is not a spill
if (FI >= LowSpillSlot) {
assert(FI >= 0 && "Spill slot index should not be negative!");
- assert(FI-LowSpillSlot < SpillSlotToUsesMap.size()
+ assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
&& "Invalid spill slot");
SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
}
void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI->getOperand(i);
- if (!MO.isFrameIndex())
+ if (!MO.isFI())
continue;
int FI = MO.getIndex();
if (MF.getFrameInfo()->isFixedObjectIndex(FI))
// is not a spill
if (FI < LowSpillSlot)
continue;
- assert(FI-LowSpillSlot < SpillSlotToUsesMap.size()
+ assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
&& "Invalid spill slot");
SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
}
DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
const TargetMachine &TM = MF.getTarget();
const TargetInstrInfo &TII = *TM.getInstrInfo();
+ const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
// LoadedRegs - Keep track of which vregs are loaded, so that we only load
MachineInstr &MI = *MII;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isRegister() && MO.getReg()) {
+ if (MO.isReg() && MO.getReg()) {
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned VirtReg = MO.getReg();
+ unsigned SubIdx = MO.getSubReg();
unsigned PhysReg = VRM.getPhys(VirtReg);
+ unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
if (!VRM.isAssignedReg(VirtReg)) {
int StackSlot = VRM.getStackSlot(VirtReg);
const TargetRegisterClass* RC =
++NumStores;
}
}
- MF.getRegInfo().setPhysRegUsed(PhysReg);
- MI.getOperand(i).setReg(PhysReg);
+ MF.getRegInfo().setPhysRegUsed(RReg);
+ MI.getOperand(i).setReg(RReg);
} else {
MF.getRegInfo().setPhysRegUsed(MO.getReg());
}
AvailableSpills &Spills, BitVector &RegKills,
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM);
+ bool CommuteToFoldReload(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MII,
+ unsigned VirtReg, unsigned SrcReg, int SS,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ const TargetRegisterInfo *TRI,
+ VirtRegMap &VRM);
void SpillRegToStackSlot(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
int Idx, unsigned PhysReg, int StackSlot,
SmallVector<unsigned, 2> *KillRegs = NULL) {
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (!MO.isRegister() || !MO.isUse() || !MO.isKill())
+ if (!MO.isReg() || !MO.isUse() || !MO.isKill())
continue;
unsigned Reg = MO.getReg();
+ if (TargetRegisterInfo::isVirtualRegister(Reg))
+ continue;
if (KillRegs)
KillRegs->push_back(Reg);
+ assert(Reg < KillOps.size());
if (KillOps[Reg] == &MO) {
RegKills.reset(Reg);
KillOps[Reg] = NULL;
MachineOperand *DefOp = NULL;
for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = DefMI->getOperand(i);
- if (MO.isRegister() && MO.isDef()) {
+ if (MO.isReg() && MO.isDef()) {
if (MO.getReg() == Reg)
DefOp = &MO;
else if (!MO.isDead())
return false;
bool FoundUse = false, Done = false;
- MachineBasicBlock::iterator E = NewDef;
+ MachineBasicBlock::iterator E = &NewDef;
++I; ++E;
for (; !Done && I != E; ++I) {
MachineInstr *NMI = I;
for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
MachineOperand &MO = NMI->getOperand(j);
- if (!MO.isRegister() || MO.getReg() != Reg)
+ if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (MO.isUse())
FoundUse = true;
/// marked kill, then it must be due to register reuse. Transfer the kill info
/// over.
static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps) {
+ std::vector<MachineOperand*> &KillOps,
+ const TargetRegisterInfo* TRI) {
const TargetInstrDesc &TID = MI.getDesc();
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (!MO.isRegister() || !MO.isUse())
+ if (!MO.isReg() || !MO.isUse())
continue;
unsigned Reg = MO.getReg();
if (Reg == 0)
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
const MachineOperand &MO = MI.getOperand(i);
- if (!MO.isRegister() || !MO.isDef())
+ if (!MO.isReg() || !MO.isDef())
continue;
unsigned Reg = MO.getReg();
RegKills.reset(Reg);
KillOps[Reg] = NULL;
+ // It also defines (or partially define) aliases.
+ for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
+ RegKills.reset(*AS);
+ KillOps[*AS] = NULL;
+ }
}
}
MachineInstr *NewMI = prior(MII);
for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = NewMI->getOperand(i);
- if (!MO.isRegister() || MO.getReg() == 0)
+ if (!MO.isReg() || MO.getReg() == 0)
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
}
Spills.ClobberPhysReg(NewPhysReg);
Spills.ClobberPhysReg(NewOp.PhysRegReused);
-
- MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
+
+ unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
+ unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
+ MI->getOperand(NewOp.Operand).setReg(RReg);
Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
--MII;
- UpdateKills(*MII, RegKills, KillOps);
+ UpdateKills(*MII, RegKills, KillOps, TRI);
DOUT << '\t' << *MII;
DOUT << "Reuse undone!\n";
/// This enables unfolding optimization for a subsequent instruction which will
/// also eliminate the newly introduced store instruction.
bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
- MachineBasicBlock::iterator &MII,
+ MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
- AvailableSpills &Spills,
- BitVector &RegKills,
- std::vector<MachineOperand*> &KillOps,
- VirtRegMap &VRM) {
+ AvailableSpills &Spills,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ VirtRegMap &VRM) {
MachineFunction &MF = *MBB.getParent();
MachineInstr &MI = *MII;
unsigned UnfoldedOpc = 0;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse())
+ if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
continue;
unsigned VirtReg = MO.getReg();
if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
return false;
continue;
}
- PhysReg = VRM.getPhys(VirtReg);
- if (!TRI->regsOverlap(PhysReg, UnfoldPR))
- continue;
+ if (VRM.hasPhys(VirtReg)) {
+ PhysReg = VRM.getPhys(VirtReg);
+ if (!TRI->regsOverlap(PhysReg, UnfoldPR))
+ continue;
+ }
// Ok, we'll need to reload the value into a register which makes
// it impossible to perform the store unfolding optimization later.
InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
+ MF.DeleteMachineInstr(NewMI);
return true;
}
- delete NewMI;
+ MF.DeleteMachineInstr(NewMI);
}
}
return false;
}
+/// CommuteToFoldReload -
+/// Look for
+/// r1 = load fi#1
+/// r1 = op r1, r2<kill>
+/// store r1, fi#1
+///
+/// If op is commutable and r2 is killed, then we can xform these to
+/// r2 = op r2, fi#1
+/// store r2, fi#1
+bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MII,
+ unsigned VirtReg, unsigned SrcReg, int SS,
+ BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps,
+ const TargetRegisterInfo *TRI,
+ VirtRegMap &VRM) {
+ if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
+ return false;
+
+ MachineFunction &MF = *MBB.getParent();
+ MachineInstr &MI = *MII;
+ MachineBasicBlock::iterator DefMII = prior(MII);
+ MachineInstr *DefMI = DefMII;
+ const TargetInstrDesc &TID = DefMI->getDesc();
+ unsigned NewDstIdx;
+ if (DefMII != MBB.begin() &&
+ TID.isCommutable() &&
+ TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
+ MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
+ unsigned NewReg = NewDstMO.getReg();
+ if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
+ return false;
+ MachineInstr *ReloadMI = prior(DefMII);
+ int FrameIdx;
+ unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
+ if (DestReg != SrcReg || FrameIdx != SS)
+ return false;
+ int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
+ if (UseIdx == -1)
+ return false;
+ int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
+ if (DefIdx == -1)
+ return false;
+ assert(DefMI->getOperand(DefIdx).isReg() &&
+ DefMI->getOperand(DefIdx).getReg() == SrcReg);
+
+ // Now commute def instruction.
+ MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
+ if (!CommutedMI)
+ return false;
+ SmallVector<unsigned, 2> Ops;
+ Ops.push_back(NewDstIdx);
+ MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
+ // Not needed since foldMemoryOperand returns new MI.
+ MF.DeleteMachineInstr(CommutedMI);
+ if (!FoldedMI)
+ return false;
+
+ VRM.addSpillSlotUse(SS, FoldedMI);
+ VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
+ // Insert new def MI and spill MI.
+ const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
+ TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
+ MII = prior(MII);
+ MachineInstr *StoreMI = MII;
+ VRM.addSpillSlotUse(SS, StoreMI);
+ VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
+ MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
+
+ // Delete all 3 old instructions.
+ InvalidateKills(*ReloadMI, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(ReloadMI);
+ MBB.erase(ReloadMI);
+ InvalidateKills(*DefMI, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(DefMI);
+ MBB.erase(DefMI);
+ InvalidateKills(MI, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(&MI);
+ MBB.erase(&MI);
+
+ ++NumCommutes;
+ return true;
+ }
+
+ return false;
+}
+
/// findSuperReg - Find the SubReg's super-register of given register class
/// where its SubIdx sub-register is SubReg.
static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
MachineOperand *LastUD = NULL;
for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
MachineOperand &MO = LastUDMI->getOperand(i);
- if (!MO.isRegister() || MO.getReg() != Reg)
+ if (!MO.isReg() || MO.getReg() != Reg)
continue;
if (!LastUD || (LastUD->isUse() && MO.isDef()))
LastUD = &MO;
}
// This invalidates Phys.
Spills.ClobberPhysReg(Phys);
- UpdateKills(*prior(MII), RegKills, KillOps);
+ UpdateKills(*prior(MII), RegKills, KillOps, TRI);
DOUT << '\t' << *prior(MII);
}
}
SmallVector<unsigned, 4> VirtUseOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (!MO.isRegister() || MO.getReg() == 0)
+ if (!MO.isReg() || MO.getReg() == 0)
continue; // Ignore non-register operands.
unsigned VirtReg = MO.getReg();
}
// Process all of the spilled uses and all non spilled reg references.
+ SmallVector<int, 2> PotentialDeadStoreSlots;
for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
unsigned i = VirtUseOps[j];
MachineOperand &MO = MI.getOperand(i);
unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
MI.getOperand(i).setReg(RReg);
if (VRM.isImplicitlyDefined(VirtReg))
- BuildMI(MBB, MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
+ BuildMI(MBB, &MI, MI.getDebugLoc(),
+ TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
continue;
}
bool CanReuse = true;
int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
if (ti != -1 &&
- MI.getOperand(ti).isRegister() &&
+ MI.getOperand(ti).isReg() &&
MI.getOperand(ti).getReg() == VirtReg) {
// Okay, we have a two address operand. We can reuse this physreg as
// long as we are allowed to clobber the value and there isn't an
if (MI.getOperand(i).isKill() &&
ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
- // This was the last use and the spilled value is still available
- // for reuse. That means the spill was unnecessary!
- MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot];
- if (DeadStore) {
- DOUT << "Removed dead store:\t" << *DeadStore;
- InvalidateKills(*DeadStore, RegKills, KillOps);
- VRM.RemoveMachineInstrFromMaps(DeadStore);
- MBB.erase(DeadStore);
- MaybeDeadStores[ReuseSlot] = NULL;
- ++NumDSE;
- }
+
+ // The store of this spilled value is potentially dead, but we
+ // won't know for certain until we've confirmed that the re-use
+ // above is valid, which means waiting until the other operands
+ // are processed. For now we just track the spill slot, we'll
+ // remove it after the other operands are processed if valid.
+
+ PotentialDeadStoreSlots.push_back(ReuseSlot);
}
continue;
} // CanReuse
TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
MachineInstr *CopyMI = prior(MII);
- UpdateKills(*CopyMI, RegKills, KillOps);
+ UpdateKills(*CopyMI, RegKills, KillOps, TRI);
// This invalidates DesignatedReg.
Spills.ClobberPhysReg(DesignatedReg);
MI.getOperand(i).setIsKill();
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
- UpdateKills(*prior(MII), RegKills, KillOps);
+ UpdateKills(*prior(MII), RegKills, KillOps, TRI);
DOUT << '\t' << *prior(MII);
}
+ // Ok - now we can remove stores that have been confirmed dead.
+ for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
+ // This was the last use and the spilled value is still available
+ // for reuse. That means the spill was unnecessary!
+ int PDSSlot = PotentialDeadStoreSlots[j];
+ MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
+ if (DeadStore) {
+ DOUT << "Removed dead store:\t" << *DeadStore;
+ InvalidateKills(*DeadStore, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(DeadStore);
+ MBB.erase(DeadStore);
+ MaybeDeadStores[PDSSlot] = NULL;
+ ++NumDSE;
+ }
+ }
+
+
DOUT << '\t' << MI;
if (DestReg != InReg) {
const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
+ MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
+ unsigned SubIdx = DefMO->getSubReg();
// Revisit the copy so we make sure to notice the effects of the
// operation on the destreg (either needing to RA it if it's
// virtual or needing to clobber any values if it's physical).
NextMII = &MI;
--NextMII; // backtrack to the copy.
+ // Propagate the sub-register index over.
+ if (SubIdx) {
+ DefMO = NextMII->findRegisterDefOperand(DestReg);
+ DefMO->setSubReg(SubIdx);
+ }
BackTracked = true;
} else {
DOUT << "Removing now-noop copy: " << MI;
// super-register is needed below.
if (KillOpnd && !KillOpnd->getSubReg() &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
- MBB.insert(MII, NewMIs[0]);
+ MBB.insert(MII, NewMIs[0]);
NewStore = NewMIs[1];
MBB.insert(MII, NewStore);
VRM.addSpillSlotUse(SS, NewStore);
if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
"Src hasn't been allocated yet?");
+
+ if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
+ RegKills, KillOps, TRI, VRM)) {
+ NextMII = next(MII);
+ BackTracked = true;
+ goto ProcessNextInst;
+ }
+
// Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
// this as a potentially dead store in case there is a subsequent
// store into the stack slot without a read from it.
MaybeDeadStores[StackSlot] = &MI;
// If the stack slot value was previously available in some other
- // register, change it now. Otherwise, make the register available,
- // in PhysReg.
- Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/);
+ // register, change it now. Otherwise, make the register
+ // available in PhysReg.
+ Spills.addAvailable(StackSlot, &MI, SrcReg, false/*!clobber*/);
}
}
}
// Process all of the spilled defs.
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (!(MO.isRegister() && MO.getReg() && MO.isDef()))
+ if (!(MO.isReg() && MO.getReg() && MO.isDef()))
continue;
unsigned VirtReg = MO.getReg();
if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
- unsigned Src, Dst;
- if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
+ unsigned Src, Dst, SrcSR, DstSR;
+ if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
SmallVector<unsigned, 2> KillRegs;
InvalidateKills(MI, RegKills, KillOps, &KillRegs);
if (MO.isDead() && !KillRegs.empty()) {
- assert(KillRegs[0] == Dst);
+ // Source register or an implicit super/sub-register use is killed.
+ assert(KillRegs[0] == Dst ||
+ TRI->isSubRegister(KillRegs[0], Dst) ||
+ TRI->isSuperRegister(KillRegs[0], Dst));
// Last def is now dead.
TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
}
// Check to see if this is a noop copy. If so, eliminate the
// instruction before considering the dest reg to be changed.
{
- unsigned Src, Dst;
- if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
+ unsigned Src, Dst, SrcSR, DstSR;
+ if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
- UpdateKills(*LastStore, RegKills, KillOps);
+ UpdateKills(*LastStore, RegKills, KillOps, TRI);
goto ProcessNextInst;
}
}
ProcessNextInst:
DistanceMap.insert(std::make_pair(&MI, Dist++));
if (!Erased && !BackTracked) {
- for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
- UpdateKills(*II, RegKills, KillOps);
+ for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
+ UpdateKills(*II, RegKills, KillOps, TRI);
}
MII = NextMII;
}