#include "llvm/Function.h"
#include "llvm/CodeGen/MachineFrameInfo.h"
#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineInstrBuilder.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/Target/TargetMachine.h"
#include "llvm/Target/TargetInstrInfo.h"
using namespace llvm;
STATISTIC(NumSpills, "Number of register spills");
+STATISTIC(NumPSpills,"Number of physical register spills");
STATISTIC(NumReMats, "Number of re-materialization");
STATISTIC(NumDRM , "Number of re-materializable defs elided");
STATISTIC(NumStores, "Number of stores added");
STATISTIC(NumReused, "Number of values reused");
STATISTIC(NumDSE , "Number of dead stores elided");
STATISTIC(NumDCE , "Number of copies elided");
+STATISTIC(NumDSS , "Number of dead spill slots removed");
namespace {
enum SpillerName { simple, local };
-
- static cl::opt<SpillerName>
- SpillerOpt("spiller",
- cl::desc("Spiller to use: (default: local)"),
- cl::Prefix,
- cl::values(clEnumVal(simple, " simple spiller"),
- clEnumVal(local, " local spiller"),
- clEnumValEnd),
- cl::init(local));
}
+static cl::opt<SpillerName>
+SpillerOpt("spiller",
+ cl::desc("Spiller to use: (default: local)"),
+ cl::Prefix,
+ cl::values(clEnumVal(simple, " simple spiller"),
+ clEnumVal(local, " local spiller"),
+ clEnumValEnd),
+ cl::init(local));
+
//===----------------------------------------------------------------------===//
// VirtRegMap implementation
//===----------------------------------------------------------------------===//
: TII(*mf.getTarget().getInstrInfo()), MF(mf),
Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
- Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) {
+ Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1),
+ LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) {
+ SpillSlotToUsesMap.resize(8);
+ ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1-
+ TargetRegisterInfo::FirstVirtualRegister);
grow();
}
Virt2SplitMap.grow(LastVirtReg);
Virt2SplitKillMap.grow(LastVirtReg);
ReMatMap.grow(LastVirtReg);
+ ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1);
}
int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg);
- int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
- RC->getAlignment());
- Virt2StackSlotMap[virtReg] = frameIndex;
+ int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
+ RC->getAlignment());
+ if (LowSpillSlot == NO_STACK_SLOT)
+ LowSpillSlot = SS;
+ if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
+ HighSpillSlot = SS;
+ unsigned Idx = SS-LowSpillSlot;
+ while (Idx >= SpillSlotToUsesMap.size())
+ SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
+ Virt2StackSlotMap[virtReg] = SS;
++NumSpills;
- return frameIndex;
+ return SS;
}
-void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) {
+void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
assert(TargetRegisterInfo::isVirtualRegister(virtReg));
assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
"attempt to assign stack slot to already spilled register");
- assert((frameIndex >= 0 ||
- (frameIndex >= MF.getFrameInfo()->getObjectIndexBegin())) &&
+ assert((SS >= 0 ||
+ (SS >= MF.getFrameInfo()->getObjectIndexBegin())) &&
"illegal fixed frame index");
- Virt2StackSlotMap[virtReg] = frameIndex;
+ Virt2StackSlotMap[virtReg] = SS;
}
int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
Virt2ReMatIdMap[virtReg] = id;
}
+int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) {
+ std::map<const TargetRegisterClass*, int>::iterator I =
+ EmergencySpillSlots.find(RC);
+ if (I != EmergencySpillSlots.end())
+ return I->second;
+ int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
+ RC->getAlignment());
+ if (LowSpillSlot == NO_STACK_SLOT)
+ LowSpillSlot = SS;
+ if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
+ HighSpillSlot = SS;
+ I->second = SS;
+ return SS;
+}
+
+void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
+ if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) {
+ // If FI < LowSpillSlot, this stack reference was produced by
+ // instruction selection and is not a spill
+ if (FI >= LowSpillSlot) {
+ assert(FI >= 0 && "Spill slot index should not be negative!");
+ assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
+ && "Invalid spill slot");
+ SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
+ }
+ }
+}
+
void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
MachineInstr *NewMI, ModRef MRInfo) {
// Move previous memory references folded to new instruction.
MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
}
+void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
+ for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = MI->getOperand(i);
+ if (!MO.isFrameIndex())
+ continue;
+ int FI = MO.getIndex();
+ if (MF.getFrameInfo()->isFixedObjectIndex(FI))
+ continue;
+ // This stack reference was produced by instruction selection and
+ // is not a spill
+ if (FI < LowSpillSlot)
+ continue;
+ assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
+ && "Invalid spill slot");
+ SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
+ }
+ MI2VirtMap.erase(MI);
+ SpillPt2VirtMap.erase(MI);
+ RestorePt2VirtMap.erase(MI);
+ EmergencySpillMap.erase(MI);
+}
+
void VirtRegMap::print(std::ostream &OS) const {
const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo();
for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) {
if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
- OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i]) << "]\n";
-
+ OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i])
+ << "]\n";
}
for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
}
void VirtRegMap::dump() const {
- print(DOUT);
+ print(cerr);
}
MachineInstr &MI = *MII;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
- if (MO.isRegister() && MO.getReg())
+ if (MO.isRegister() && MO.getReg()) {
if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
unsigned VirtReg = MO.getReg();
unsigned PhysReg = VRM.getPhys(VirtReg);
std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
== LoadedRegs.end()) {
TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
+ MachineInstr *LoadMI = prior(MII);
+ VRM.addSpillSlotUse(StackSlot, LoadMI);
LoadedRegs.push_back(VirtReg);
++NumLoads;
- DOUT << '\t' << *prior(MII);
+ DOUT << '\t' << *LoadMI;
}
if (MO.isDef()) {
TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
StackSlot, RC);
+ MachineInstr *StoreMI = next(MII);
+ VRM.addSpillSlotUse(StackSlot, StoreMI);
++NumStores;
}
}
} else {
MF.getRegInfo().setPhysRegUsed(MO.getReg());
}
+ }
}
DOUT << '\t' << MI;
MachineRegisterInfo *RegInfo;
const TargetRegisterInfo *TRI;
const TargetInstrInfo *TII;
+ DenseMap<MachineInstr*, unsigned> DistanceMap;
public:
bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
RegInfo = &MF.getRegInfo();
MBB != E; ++MBB)
RewriteMBB(*MBB, VRM);
+ // Mark unused spill slots.
+ MachineFrameInfo *MFI = MF.getFrameInfo();
+ int SS = VRM.getLowSpillSlot();
+ if (SS != VirtRegMap::NO_STACK_SLOT)
+ for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
+ if (!VRM.isSpillSlotUsed(SS)) {
+ MFI->RemoveStackObject(SS);
+ ++NumDSS;
+ }
+
DOUT << "**** Post Machine Instrs ****\n";
DEBUG(MF.dump());
return true;
}
private:
+ void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
+ unsigned Reg, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps);
bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
MachineBasicBlock::iterator &MII,
std::vector<MachineInstr*> &MaybeDeadStores,
if (Reg == 0)
continue;
- if (RegKills[Reg]) {
+ if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
// That can't be right. Register is killed but not re-defined and it's
// being reused. Let's fix that.
KillOps[Reg]->setIsKill(false);
}
}
+/// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
+///
+static void ReMaterialize(MachineBasicBlock &MBB,
+ MachineBasicBlock::iterator &MII,
+ unsigned DestReg, unsigned Reg,
+ const TargetInstrInfo *TII,
+ const TargetRegisterInfo *TRI,
+ VirtRegMap &VRM) {
+ TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
+ MachineInstr *NewMI = prior(MII);
+ for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = NewMI->getOperand(i);
+ if (!MO.isRegister() || MO.getReg() == 0)
+ continue;
+ unsigned VirtReg = MO.getReg();
+ if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
+ continue;
+ assert(MO.isUse());
+ unsigned SubIdx = MO.getSubReg();
+ unsigned Phys = VRM.getPhys(VirtReg);
+ assert(Phys);
+ unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
+ MO.setReg(RReg);
+ }
+ ++NumReMats;
+}
+
// ReusedOp - For each reused operand, we keep track of a bit of information, in
// case we need to rollback upon processing a new operand. See comments below.
MI, Spills, MaybeDeadStores,
Rejected, RegKills, KillOps, VRM);
+ MachineBasicBlock::iterator MII = MI;
if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
- TRI->reMaterialize(*MBB, MI, NewPhysReg,
- VRM.getReMaterializedMI(NewOp.VirtReg));
- ++NumReMats;
+ ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
} else {
- TII->loadRegFromStackSlot(*MBB, MI, NewPhysReg,
+ TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
NewOp.StackSlotOrReMat, AliasRC);
+ MachineInstr *LoadMI = prior(MII);
+ VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
// Any stores to this stack slot are not dead anymore.
MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
++NumLoads;
MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
- MachineBasicBlock::iterator MII = MI;
--MII;
UpdateKills(*MII, RegKills, KillOps);
DOUT << '\t' << *MII;
/// xorl %edi, %eax
/// movl %eax, -32(%ebp)
/// movl -36(%ebp), %eax
-/// orl %eax, -32(%ebp)
+/// orl %eax, -32(%ebp)
/// ==>
/// xorl %edi, %eax
/// orl -36(%ebp), %eax
unsigned UnfoldVR = 0;
int FoldedSS = VirtRegMap::NO_STACK_SLOT;
VirtRegMap::MI2VirtMapTy::const_iterator I, End;
- for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
+ for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
// Only transform a MI that folds a single register.
if (UnfoldedOpc)
return false;
UnfoldVR = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
+ // MI2VirtMap be can updated which invalidate the iterator.
+ // Increment the iterator first.
+ ++I;
if (VRM.isAssignedReg(UnfoldVR))
continue;
// If this reference is not a use, any previous store is now dead.
MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
if (DeadStore && (MR & VirtRegMap::isModRef)) {
unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
- if (!PhysReg ||
- DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1)
+ if (!PhysReg || !DeadStore->readsRegister(PhysReg))
continue;
UnfoldPR = PhysReg;
UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
assert(NewMIs.size() == 1);
MachineInstr *NewMI = NewMIs.back();
NewMIs.clear();
- int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
+ int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
assert(Idx != -1);
SmallVector<unsigned, 2> Ops;
Ops.push_back(Idx);
MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
if (FoldedMI) {
+ VRM.addSpillSlotUse(SS, FoldedMI);
if (!VRM.hasPhys(UnfoldVR))
VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
MII = MBB.insert(MII, FoldedMI);
+ InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
return true;
std::vector<MachineOperand*> &KillOps,
VirtRegMap &VRM) {
TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
- DOUT << "Store:\t" << *next(MII);
+ MachineInstr *StoreMI = next(MII);
+ VRM.addSpillSlotUse(StackSlot, StoreMI);
+ DOUT << "Store:\t" << *StoreMI;
// If there is a dead store to this stack slot, nuke it now.
if (LastStore) {
bool CheckDef = PrevMII != MBB.begin();
if (CheckDef)
--PrevMII;
- MBB.erase(LastStore);
VRM.RemoveMachineInstrFromMaps(LastStore);
+ MBB.erase(LastStore);
if (CheckDef) {
// Look at defs of killed registers on the store. Mark the defs
// as dead since the store has been deleted and they aren't
if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
// FIXME: This assumes a remat def does not have side
// effects.
- MBB.erase(DeadDef);
VRM.RemoveMachineInstrFromMaps(DeadDef);
+ MBB.erase(DeadDef);
++NumDRM;
}
}
++NumStores;
}
+/// TransferDeadness - A identity copy definition is dead and it's being
+/// removed. Find the last def or use and mark it as dead / kill.
+void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
+ unsigned Reg, BitVector &RegKills,
+ std::vector<MachineOperand*> &KillOps) {
+ int LastUDDist = -1;
+ MachineInstr *LastUDMI = NULL;
+ for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
+ RE = RegInfo->reg_end(); RI != RE; ++RI) {
+ MachineInstr *UDMI = &*RI;
+ if (UDMI->getParent() != MBB)
+ continue;
+ DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
+ if (DI == DistanceMap.end() || DI->second > CurDist)
+ continue;
+ if ((int)DI->second < LastUDDist)
+ continue;
+ LastUDDist = DI->second;
+ LastUDMI = UDMI;
+ }
+
+ if (LastUDMI) {
+ const TargetInstrDesc &TID = LastUDMI->getDesc();
+ MachineOperand *LastUD = NULL;
+ for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
+ MachineOperand &MO = LastUDMI->getOperand(i);
+ if (!MO.isRegister() || MO.getReg() != Reg)
+ continue;
+ if (!LastUD || (LastUD->isUse() && MO.isDef()))
+ LastUD = &MO;
+ if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
+ return;
+ }
+ if (LastUD->isDef())
+ LastUD->setIsDead();
+ else {
+ LastUD->setIsKill();
+ RegKills.set(Reg);
+ KillOps[Reg] = LastUD;
+ }
+ }
+}
+
/// rewriteMBB - Keep track of which spills are available even after the
/// register allocator is done with them. If possible, avid reloading vregs.
void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
std::vector<MachineOperand*> KillOps;
KillOps.resize(TRI->getNumRegs(), NULL);
+ unsigned Dist = 0;
+ DistanceMap.clear();
for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
MII != E; ) {
MachineBasicBlock::iterator NextMII = MII; ++NextMII;
MachineInstr &MI = *MII;
const TargetInstrDesc &TID = MI.getDesc();
+ if (VRM.hasEmergencySpills(&MI)) {
+ // Spill physical register(s) in the rare case the allocator has run out
+ // of registers to allocate.
+ SmallSet<int, 4> UsedSS;
+ std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
+ for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
+ unsigned PhysReg = EmSpills[i];
+ const TargetRegisterClass *RC =
+ TRI->getPhysicalRegisterRegClass(PhysReg);
+ assert(RC && "Unable to determine register class!");
+ int SS = VRM.getEmergencySpillSlot(RC);
+ if (UsedSS.count(SS))
+ assert(0 && "Need to spill more than one physical registers!");
+ UsedSS.insert(SS);
+ TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
+ MachineInstr *StoreMI = prior(MII);
+ VRM.addSpillSlotUse(SS, StoreMI);
+ TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
+ MachineInstr *LoadMI = next(MII);
+ VRM.addSpillSlotUse(SS, LoadMI);
+ ++NumPSpills;
+ }
+ NextMII = next(MII);
+ }
+
// Insert restores here if asked to.
if (VRM.isRestorePt(&MI)) {
std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
- unsigned VirtReg = RestoreRegs[i];
+ unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
if (!VRM.getPreSplitReg(VirtReg))
continue; // Split interval spilled again.
unsigned Phys = VRM.getPhys(VirtReg);
RegInfo->setPhysRegUsed(Phys);
if (VRM.isReMaterialized(VirtReg)) {
- TRI->reMaterialize(MBB, &MI, Phys,
- VRM.getReMaterializedMI(VirtReg));
- ++NumReMats;
+ ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
} else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
- TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
- RC);
+ int SS = VRM.getStackSlot(VirtReg);
+ TII->loadRegFromStackSlot(MBB, &MI, Phys, SS, RC);
+ MachineInstr *LoadMI = prior(MII);
+ VRM.addSpillSlotUse(SS, LoadMI);
++NumLoads;
}
// This invalidates Phys.
int StackSlot = VRM.getStackSlot(VirtReg);
TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
MachineInstr *StoreMI = next(MII);
- DOUT << "Store:\t" << StoreMI;
+ VRM.addSpillSlotUse(StackSlot, StoreMI);
+ DOUT << "Store:\t" << *StoreMI;
VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
}
NextMII = next(MII);
/// ReusedOperands - Keep track of operand reuse in case we need to undo
/// reuse.
ReuseInfo ReusedOperands(MI, TRI);
- // Process all of the spilled uses and all non spilled reg references.
+ SmallVector<unsigned, 4> VirtUseOps;
for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
MachineOperand &MO = MI.getOperand(i);
if (!MO.isRegister() || MO.getReg() == 0)
RegInfo->setPhysRegUsed(VirtReg);
continue;
}
-
+
+ // We want to process implicit virtual register uses first.
+ if (MO.isImplicit())
+ // If the virtual register is implicitly defined, emit a implicit_def
+ // before so scavenger knows it's "defined".
+ VirtUseOps.insert(VirtUseOps.begin(), i);
+ else
+ VirtUseOps.push_back(i);
+ }
+
+ // Process all of the spilled uses and all non spilled reg references.
+ for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
+ unsigned i = VirtUseOps[j];
+ MachineOperand &MO = MI.getOperand(i);
+ unsigned VirtReg = MO.getReg();
assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
- "Not a virtual or a physical register?");
+ "Not a virtual register?");
unsigned SubIdx = MO.getSubReg();
if (VRM.isAssignedReg(VirtReg)) {
ReusedOperands.markClobbered(Phys);
unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
MI.getOperand(i).setReg(RReg);
+ if (VRM.isImplicitlyDefined(VirtReg))
+ BuildMI(MBB, MI, TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
continue;
}
DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
else
DOUT << "Reusing SS#" << ReuseSlot;
- DOUT << " from physreg " << TRI->getName(PhysReg) << " for vreg"
- << VirtReg
+ DOUT << " from physreg " << TRI->getName(PhysReg)
+ << " for vreg" << VirtReg
<< " instead of reloading into same physreg.\n";
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
MI.getOperand(i).setReg(RReg);
RegInfo->setPhysRegUsed(PhysReg);
ReusedOperands.markClobbered(PhysReg);
if (DoReMat) {
- TRI->reMaterialize(MBB, &MI, PhysReg, VRM.getReMaterializedMI(VirtReg));
- ++NumReMats;
+ ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
} else {
const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
+ MachineInstr *LoadMI = prior(MII);
+ VRM.addSpillSlotUse(SSorRMId, LoadMI);
++NumLoads;
}
// This invalidates PhysReg.
// physical registers that may contain the value of the spilled virtual
// register
SmallSet<int, 2> FoldedSS;
- for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
+ for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
unsigned VirtReg = I->second.first;
VirtRegMap::ModRef MR = I->second.second;
DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
+ // MI2VirtMap be can updated which invalidate the iterator.
+ // Increment the iterator first.
+ ++I;
int SS = VRM.getStackSlot(VirtReg);
if (SS == VirtRegMap::NO_STACK_SLOT)
continue;
InvalidateKill(InReg, RegKills, KillOps);
}
+ InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
if (PhysReg &&
TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
MBB.insert(MII, NewMIs[0]);
+ InvalidateKills(MI, RegKills, KillOps);
VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
// the value and there isn't an earlier def that has already clobbered
// the physreg.
if (PhysReg &&
- !TII->isStoreToStackSlot(&MI, SS) && // Not profitable!
- DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
- TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
- MBB.insert(MII, NewMIs[0]);
- NewStore = NewMIs[1];
- MBB.insert(MII, NewStore);
- VRM.RemoveMachineInstrFromMaps(&MI);
- MBB.erase(&MI);
- Erased = true;
- --NextMII;
- --NextMII; // backtrack to the unfolded instruction.
- BackTracked = true;
- isDead = true;
+ !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
+ MachineOperand *KillOpnd =
+ DeadStore->findRegisterUseOperand(PhysReg, true);
+ // Note, if the store is storing a sub-register, it's possible the
+ // super-register is needed below.
+ if (KillOpnd && !KillOpnd->getSubReg() &&
+ TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
+ MBB.insert(MII, NewMIs[0]);
+ NewStore = NewMIs[1];
+ MBB.insert(MII, NewStore);
+ VRM.addSpillSlotUse(SS, NewStore);
+ InvalidateKills(MI, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(&MI);
+ MBB.erase(&MI);
+ Erased = true;
+ --NextMII;
+ --NextMII; // backtrack to the unfolded instruction.
+ BackTracked = true;
+ isDead = true;
+ }
}
}
if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
+ SmallVector<unsigned, 2> KillRegs;
+ InvalidateKills(MI, RegKills, KillOps, &KillRegs);
+ if (MO.isDead() && !KillRegs.empty()) {
+ assert(KillRegs[0] == Dst);
+ // Last def is now dead.
+ TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
+ }
+ VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
- VRM.RemoveMachineInstrFromMaps(&MI);
Spills.disallowClobberPhysReg(VirtReg);
goto ProcessNextInst;
}
}
}
+ assert(PhysReg && "VR not assigned a physical register?");
RegInfo->setPhysRegUsed(PhysReg);
unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
ReusedOperands.markClobbered(RReg);
if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
++NumDCE;
DOUT << "Removing now-noop copy: " << MI;
+ InvalidateKills(MI, RegKills, KillOps);
+ VRM.RemoveMachineInstrFromMaps(&MI);
MBB.erase(&MI);
Erased = true;
- VRM.RemoveMachineInstrFromMaps(&MI);
UpdateKills(*LastStore, RegKills, KillOps);
goto ProcessNextInst;
}
}
}
ProcessNextInst:
+ DistanceMap.insert(std::make_pair(&MI, Dist++));
if (!Erased && !BackTracked) {
for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
UpdateKills(*II, RegKills, KillOps);