X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FLiveIntervalAnalysis.cpp;h=934cc124c77e5b483a2b36f53615160157b72862;hb=bc7d448f242b1bbc1031fb87cd69c285ff9aaffa;hp=2726fc337539c91871bff421d8626fb173a01add;hpb=148341cc9b60d7d88be9c07a2b32b436e0cd301d;p=oota-llvm.git diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 2726fc33753..934cc124c77 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -15,30 +15,22 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "liveintervals" +#define DEBUG_TYPE "regalloc" #include "llvm/CodeGen/LiveIntervalAnalysis.h" -#include "VirtRegMap.h" #include "llvm/Value.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/LiveVariables.h" -#include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineInstrBuilder.h" -#include "llvm/CodeGen/MachineLoopInfo.h" -#include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" -#include "llvm/CodeGen/ProcessImplicitDefs.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" -#include "llvm/ADT/DepthFirstIterator.h" -#include "llvm/ADT/SmallSet.h" +#include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/ADT/STLExtras.h" #include @@ -51,12 +43,16 @@ static cl::opt DisableReMat("disable-rematerialization", cl::init(false), cl::Hidden); STATISTIC(numIntervals , "Number of original intervals"); -STATISTIC(numFolds , "Number of loads/stores folded into instructions"); -STATISTIC(numSplits , "Number of intervals split"); char LiveIntervals::ID = 0; -INITIALIZE_PASS(LiveIntervals, "liveintervals", - "Live Interval Analysis", false, false); +INITIALIZE_PASS_BEGIN(LiveIntervals, "liveintervals", + "Live Interval Analysis", false, false) +INITIALIZE_AG_DEPENDENCY(AliasAnalysis) +INITIALIZE_PASS_DEPENDENCY(LiveVariables) +INITIALIZE_PASS_DEPENDENCY(MachineDominatorTree) +INITIALIZE_PASS_DEPENDENCY(SlotIndexes) +INITIALIZE_PASS_END(LiveIntervals, "liveintervals", + "Live Interval Analysis", false, false) void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { AU.setPreservesCFG(); @@ -64,18 +60,8 @@ void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const { AU.addPreserved(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); + AU.addPreservedID(MachineLoopInfoID); AU.addPreservedID(MachineDominatorsID); - - if (!StrongPHIElim) { - AU.addPreservedID(PHIEliminationID); - AU.addRequiredID(PHIEliminationID); - } - - AU.addRequiredID(TwoAddressInstructionPassID); - AU.addPreserved(); - AU.addRequired(); AU.addPreserved(); AU.addRequiredTransitive(); MachineFunctionPass::getAnalysisUsage(AU); @@ -88,14 +74,12 @@ void LiveIntervals::releaseMemory() { delete I->second; r2iMap_.clear(); + RegMaskSlots.clear(); + RegMaskBits.clear(); + RegMaskBlocks.clear(); // Release VNInfo memory regions, VNInfo objects don't need to be dtor'd. VNInfoAllocator.Reset(); - while (!CloneMIs.empty()) { - MachineInstr *MI = CloneMIs.back(); - CloneMIs.pop_back(); - mf_->DeleteMachineInstr(MI); - } } /// runOnMachineFunction - Register allocate the whole function @@ -110,6 +94,7 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { lv_ = &getAnalysis(); indexes_ = &getAnalysis(); allocatableRegs_ = tri_->getAllocatableSet(fn); + reservedRegs_ = tri_->getReservedRegs(fn); computeIntervals(); @@ -122,141 +107,34 @@ bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) { /// print - Implement the dump method. void LiveIntervals::print(raw_ostream &OS, const Module* ) const { OS << "********** INTERVALS **********\n"; - for (const_iterator I = begin(), E = end(); I != E; ++I) { - I->second->print(OS, tri_); - OS << "\n"; - } + + // Dump the physregs. + for (unsigned Reg = 1, RegE = tri_->getNumRegs(); Reg != RegE; ++Reg) + if (const LiveInterval *LI = r2iMap_.lookup(Reg)) { + LI->print(OS, tri_); + OS << '\n'; + } + + // Dump the virtregs. + for (unsigned Reg = 0, RegE = mri_->getNumVirtRegs(); Reg != RegE; ++Reg) + if (const LiveInterval *LI = + r2iMap_.lookup(TargetRegisterInfo::index2VirtReg(Reg))) { + LI->print(OS, tri_); + OS << '\n'; + } printInstrs(OS); } void LiveIntervals::printInstrs(raw_ostream &OS) const { OS << "********** MACHINEINSTRS **********\n"; - - for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end(); - mbbi != mbbe; ++mbbi) { - OS << "BB#" << mbbi->getNumber() - << ":\t\t# derived from " << mbbi->getName() << "\n"; - for (MachineBasicBlock::iterator mii = mbbi->begin(), - mie = mbbi->end(); mii != mie; ++mii) { - if (mii->isDebugValue()) - OS << " \t" << *mii; - else - OS << getInstructionIndex(mii) << '\t' << *mii; - } - } + mf_->print(OS, indexes_); } void LiveIntervals::dumpInstrs() const { printInstrs(dbgs()); } -bool LiveIntervals::conflictsWithPhysReg(const LiveInterval &li, - VirtRegMap &vrm, unsigned reg) { - // We don't handle fancy stuff crossing basic block boundaries - if (li.ranges.size() != 1) - return true; - const LiveRange &range = li.ranges.front(); - SlotIndex idx = range.start.getBaseIndex(); - SlotIndex end = range.end.getPrevSlot().getBaseIndex().getNextIndex(); - - // Skip deleted instructions - MachineInstr *firstMI = getInstructionFromIndex(idx); - while (!firstMI && idx != end) { - idx = idx.getNextIndex(); - firstMI = getInstructionFromIndex(idx); - } - if (!firstMI) - return false; - - // Find last instruction in range - SlotIndex lastIdx = end.getPrevIndex(); - MachineInstr *lastMI = getInstructionFromIndex(lastIdx); - while (!lastMI && lastIdx != idx) { - lastIdx = lastIdx.getPrevIndex(); - lastMI = getInstructionFromIndex(lastIdx); - } - if (!lastMI) - return false; - - // Range cannot cross basic block boundaries or terminators - MachineBasicBlock *MBB = firstMI->getParent(); - if (MBB != lastMI->getParent() || lastMI->getDesc().isTerminator()) - return true; - - MachineBasicBlock::const_iterator E = lastMI; - ++E; - for (MachineBasicBlock::const_iterator I = firstMI; I != E; ++I) { - const MachineInstr &MI = *I; - - // Allow copies to and from li.reg - if (MI.isCopy()) - if (MI.getOperand(0).getReg() == li.reg || - MI.getOperand(1).getReg() == li.reg) - continue; - - // Check for operands using reg - for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { - const MachineOperand& mop = MI.getOperand(i); - if (!mop.isReg()) - continue; - unsigned PhysReg = mop.getReg(); - if (PhysReg == 0 || PhysReg == li.reg) - continue; - if (TargetRegisterInfo::isVirtualRegister(PhysReg)) { - if (!vrm.hasPhys(PhysReg)) - continue; - PhysReg = vrm.getPhys(PhysReg); - } - if (PhysReg && tri_->regsOverlap(PhysReg, reg)) - return true; - } - } - - // No conflicts found. - return false; -} - -bool LiveIntervals::conflictsWithAliasRef(LiveInterval &li, unsigned Reg, - SmallPtrSet &JoinedCopies) { - for (LiveInterval::Ranges::const_iterator - I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { - for (SlotIndex index = I->start.getBaseIndex(), - end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); - index != end; - index = index.getNextIndex()) { - MachineInstr *MI = getInstructionFromIndex(index); - if (!MI) - continue; // skip deleted instructions - - if (JoinedCopies.count(MI)) - continue; - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - MachineOperand& MO = MI->getOperand(i); - if (!MO.isReg()) - continue; - unsigned PhysReg = MO.getReg(); - if (PhysReg == 0 || PhysReg == Reg || - TargetRegisterInfo::isVirtualRegister(PhysReg)) - continue; - if (tri_->regsOverlap(Reg, PhysReg)) - return true; - } - } - } - - return false; -} - -#ifndef NDEBUG -static void printRegName(unsigned reg, const TargetRegisterInfo* tri_) { - if (TargetRegisterInfo::isPhysicalRegister(reg)) - dbgs() << tri_->getName(reg); - else - dbgs() << "%reg" << reg; -} -#endif - static bool MultipleDefsBySameMI(const MachineInstr &MI, unsigned MOIdx) { unsigned Reg = MI.getOperand(MOIdx).getReg(); @@ -282,11 +160,11 @@ bool LiveIntervals::isPartialRedef(SlotIndex MIIdx, MachineOperand &MO, if (!MO.getSubReg() || MO.isEarlyClobber()) return false; - SlotIndex RedefIndex = MIIdx.getDefIndex(); + SlotIndex RedefIndex = MIIdx.getRegSlot(); const LiveRange *OldLR = - interval.getLiveRangeContaining(RedefIndex.getUseIndex()); - if (OldLR->valno->isDefAccurate()) { - MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); + interval.getLiveRangeContaining(RedefIndex.getRegSlot(true)); + MachineInstr *DefMI = getInstructionFromIndex(OldLR->valno->def); + if (DefMI != 0) { return DefMI->findRegisterDefOperandIdx(interval.reg) != -1; } return false; @@ -298,10 +176,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, MachineOperand& MO, unsigned MOIdx, LiveInterval &interval) { - DEBUG({ - dbgs() << "\t\tregister: "; - printRegName(interval.reg, tri_); - }); + DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_)); // Virtual registers may be defined multiple times (due to phi // elimination and 2-addr elimination). Much of what we do only has to be @@ -310,24 +185,13 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg); if (interval.empty()) { // Get the Idx of the defining instructions. - SlotIndex defIndex = MIIdx.getDefIndex(); - // Earlyclobbers move back one, so that they overlap the live range - // of inputs. - if (MO.isEarlyClobber()) - defIndex = MIIdx.getUseIndex(); - - // Make sure the first definition is not a partial redefinition. Add an - // of the full register. - if (MO.getSubReg()) - mi->addRegisterDefined(interval.reg); - - MachineInstr *CopyMI = NULL; - if (mi->isCopyLike()) { - CopyMI = mi; - } + SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber()); - VNInfo *ValNo = interval.getNextValue(defIndex, CopyMI, true, - VNInfoAllocator); + // Make sure the first definition is not a partial redefinition. + assert(!MO.readsReg() && "First def cannot also read virtual register " + "missing flag?"); + + VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator); assert(ValNo->id == 0 && "First value in interval is not 0?"); // Loop over all of the blocks that the vreg is defined in. There are @@ -338,9 +202,9 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // FIXME: what about dead vars? SlotIndex killIdx; if (vi.Kills[0] != mi) - killIdx = getInstructionIndex(vi.Kills[0]).getDefIndex(); + killIdx = getInstructionIndex(vi.Kills[0]).getRegSlot(); else - killIdx = defIndex.getStoreIndex(); + killIdx = defIndex.getDeadSlot(); // If the kill happens after the definition, we have an intra-block // live range. @@ -388,13 +252,14 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) { MachineInstr *Kill = vi.Kills[i]; SlotIndex Start = getMBBStartIdx(Kill->getParent()); - SlotIndex killIdx = getInstructionIndex(Kill).getDefIndex(); + SlotIndex killIdx = getInstructionIndex(Kill).getRegSlot(); // Create interval with one of a NEW value number. Note that this value // number isn't actually defined by an instruction, weird huh? :) if (PHIJoin) { - ValNo = interval.getNextValue(SlotIndex(Start, true), 0, false, - VNInfoAllocator); + assert(getInstructionFromIndex(Start) == 0 && + "PHI def index points at actual instruction."); + ValNo = interval.getNextValue(Start, VNInfoAllocator); ValNo->setIsPHIDef(true); } LiveRange LR(Start, killIdx, ValNo); @@ -425,14 +290,12 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // are actually two values in the live interval. Because of this we // need to take the LiveRegion that defines this register and split it // into two values. - SlotIndex RedefIndex = MIIdx.getDefIndex(); - if (MO.isEarlyClobber()) - RedefIndex = MIIdx.getUseIndex(); + SlotIndex RedefIndex = MIIdx.getRegSlot(MO.isEarlyClobber()); const LiveRange *OldLR = - interval.getLiveRangeContaining(RedefIndex.getUseIndex()); + interval.getLiveRangeContaining(RedefIndex.getRegSlot(true)); VNInfo *OldValNo = OldLR->valno; - SlotIndex DefIndex = OldValNo->def.getDefIndex(); + SlotIndex DefIndex = OldValNo->def.getRegSlot(); // Delete the previous value, which should be short and continuous, // because the 2-addr copy must be in the same MBB as the redef. @@ -440,18 +303,10 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // The new value number (#1) is defined by the instruction we claimed // defined value #0. - VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->getCopy(), - false, // update at * - VNInfoAllocator); - ValNo->setFlags(OldValNo->getFlags()); // * <- updating here + VNInfo *ValNo = interval.createValueCopy(OldValNo, VNInfoAllocator); // Value#0 is now defined by the 2-addr instruction. - OldValNo->def = RedefIndex; - OldValNo->setCopy(0); - - // A re-def may be a copy. e.g. %reg1030:6 = VMOVD %reg1026, ... - if (PartReDef && mi->isCopyLike()) - OldValNo->setCopy(&*mi); + OldValNo->def = RedefIndex; // Add the new live interval which replaces the range for the input copy. LiveRange LR(DefIndex, RedefIndex, ValNo); @@ -461,7 +316,7 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // If this redefinition is dead, we need to add a dummy unit live // range covering the def slot. if (MO.isDead()) - interval.addRange(LiveRange(RedefIndex, RedefIndex.getStoreIndex(), + interval.addRange(LiveRange(RedefIndex, RedefIndex.getDeadSlot(), OldValNo)); DEBUG({ @@ -473,15 +328,11 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, // live until the end of the block. We've already taken care of the // rest of the live range. - SlotIndex defIndex = MIIdx.getDefIndex(); + SlotIndex defIndex = MIIdx.getRegSlot(); if (MO.isEarlyClobber()) - defIndex = MIIdx.getUseIndex(); + defIndex = MIIdx.getRegSlot(true); - VNInfo *ValNo; - MachineInstr *CopyMI = NULL; - if (mi->isCopyLike()) - CopyMI = mi; - ValNo = interval.getNextValue(defIndex, CopyMI, true, VNInfoAllocator); + VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator); SlotIndex killIndex = getMBBEndIdx(mbb); LiveRange LR(defIndex, killIndex, ValNo); @@ -496,24 +347,26 @@ void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb, DEBUG(dbgs() << '\n'); } +static bool isRegLiveIntoSuccessor(const MachineBasicBlock *MBB, unsigned Reg) { + for (MachineBasicBlock::const_succ_iterator SI = MBB->succ_begin(), + SE = MBB->succ_end(); + SI != SE; ++SI) { + const MachineBasicBlock* succ = *SI; + if (succ->isLiveIn(Reg)) + return true; + } + return false; +} + void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, MachineBasicBlock::iterator mi, SlotIndex MIIdx, MachineOperand& MO, - LiveInterval &interval, - MachineInstr *CopyMI) { - // A physical register cannot be live across basic block, so its - // lifetime must end somewhere in its defining basic block. - DEBUG({ - dbgs() << "\t\tregister: "; - printRegName(interval.reg, tri_); - }); + LiveInterval &interval) { + DEBUG(dbgs() << "\t\tregister: " << PrintReg(interval.reg, tri_)); SlotIndex baseIndex = MIIdx; - SlotIndex start = baseIndex.getDefIndex(); - // Earlyclobbers move back one. - if (MO.isEarlyClobber()) - start = MIIdx.getUseIndex(); + SlotIndex start = baseIndex.getRegSlot(MO.isEarlyClobber()); SlotIndex end = start; // If it is not used after definition, it is considered dead at @@ -523,7 +376,7 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, // advance below compensates. if (MO.isDead()) { DEBUG(dbgs() << " dead"); - end = start.getStoreIndex(); + end = start.getDeadSlot(); goto exit; } @@ -540,21 +393,21 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, if (mi->killsRegister(interval.reg, tri_)) { DEBUG(dbgs() << " killed"); - end = baseIndex.getDefIndex(); + end = baseIndex.getRegSlot(); goto exit; } else { int DefIdx = mi->findRegisterDefOperandIdx(interval.reg,false,false,tri_); if (DefIdx != -1) { if (mi->isRegTiedToUseOperand(DefIdx)) { // Two-address instruction. - end = baseIndex.getDefIndex(); + end = baseIndex.getRegSlot(mi->getOperand(DefIdx).isEarlyClobber()); } else { // Another instruction redefines the register before it is ever read. // Then the register is essentially dead at the instruction that // defines it. Hence its interval is: // [defSlot(def), defSlot(def)+1) DEBUG(dbgs() << " dead"); - end = start.getStoreIndex(); + end = start.getDeadSlot(); } goto exit; } @@ -563,22 +416,27 @@ void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB, baseIndex = baseIndex.getNextIndex(); } - // The only case we should have a dead physreg here without a killing or - // instruction where we know it's dead is if it is live-in to the function - // and never used. Another possible case is the implicit use of the - // physical register has been deleted by two-address pass. - end = start.getStoreIndex(); + // If we get here the register *should* be live out. + assert(!isAllocatable(interval.reg) && "Physregs shouldn't be live out!"); + // FIXME: We need saner rules for reserved regs. + if (isReserved(interval.reg)) { + end = start.getDeadSlot(); + } else { + // Unreserved, unallocable registers like EFLAGS can be live across basic + // block boundaries. + assert(isRegLiveIntoSuccessor(MBB, interval.reg) && + "Unreserved reg not live-out?"); + end = getMBBEndIdx(MBB); + } exit: assert(start < end && "did not find end of interval?"); // Already exists? Extend old live interval. - LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start); - bool Extend = OldLR != interval.end(); - VNInfo *ValNo = Extend - ? OldLR->valno : interval.getNextValue(start, CopyMI, true, VNInfoAllocator); - if (MO.isEarlyClobber() && Extend) - ValNo->setHasRedefByEC(true); + VNInfo *ValNo = interval.getVNInfoAt(start); + bool Extend = ValNo != 0; + if (!Extend) + ValNo = interval.getNextValue(start, VNInfoAllocator); LiveRange LR(start, end, ValNo); interval.addRange(LR); DEBUG(dbgs() << " +" << LR << '\n'); @@ -592,29 +450,21 @@ void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB, if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) handleVirtualRegisterDef(MBB, MI, MIIdx, MO, MOIdx, getOrCreateInterval(MO.getReg())); - else if (allocatableRegs_[MO.getReg()]) { - MachineInstr *CopyMI = NULL; - if (MI->isCopyLike()) - CopyMI = MI; + else handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, - getOrCreateInterval(MO.getReg()), CopyMI); - // Def of a register also defines its sub-registers. - for (const unsigned* AS = tri_->getSubRegisters(MO.getReg()); *AS; ++AS) - // If MI also modifies the sub-register explicitly, avoid processing it - // more than once. Do not pass in TRI here so it checks for exact match. - if (!MI->definesRegister(*AS)) - handlePhysicalRegisterDef(MBB, MI, MIIdx, MO, - getOrCreateInterval(*AS), 0); - } + getOrCreateInterval(MO.getReg())); } void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, SlotIndex MIIdx, - LiveInterval &interval, bool isAlias) { - DEBUG({ - dbgs() << "\t\tlivein register: "; - printRegName(interval.reg, tri_); - }); + LiveInterval &interval) { + assert(TargetRegisterInfo::isPhysicalRegister(interval.reg) && + "Only physical registers can be live in."); + assert((!isAllocatable(interval.reg) || MBB->getParent()->begin() || + MBB->isLandingPad()) && + "Allocatable live-ins only valid for entry blocks and landing pads."); + + DEBUG(dbgs() << "\t\tlivein register: " << PrintReg(interval.reg, tri_)); // Look for kills, if it reaches a def before it's killed, then it shouldn't // be considered a livein. @@ -640,16 +490,16 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, while (mi != E) { if (mi->killsRegister(interval.reg, tri_)) { DEBUG(dbgs() << " killed"); - end = baseIndex.getDefIndex(); + end = baseIndex.getRegSlot(); SeenDefUse = true; break; - } else if (mi->definesRegister(interval.reg, tri_)) { + } else if (mi->modifiesRegister(interval.reg, tri_)) { // Another instruction redefines the register before it is ever read. // Then the register is essentially dead at the instruction that defines // it. Hence its interval is: // [defSlot(def), defSlot(def)+1) DEBUG(dbgs() << " dead"); - end = start.getStoreIndex(); + end = start.getDeadSlot(); SeenDefUse = true; break; } @@ -663,18 +513,25 @@ void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB, // Live-in register might not be used at all. if (!SeenDefUse) { - if (isAlias) { + if (isAllocatable(interval.reg) || + !isRegLiveIntoSuccessor(MBB, interval.reg)) { + // Allocatable registers are never live through. + // Non-allocatable registers that aren't live into any successors also + // aren't live through. DEBUG(dbgs() << " dead"); - end = MIIdx.getStoreIndex(); + return; } else { + // If we get here the register is non-allocatable and live into some + // successor. We'll conservatively assume it's live-through. DEBUG(dbgs() << " live through"); - end = baseIndex; + end = getMBBEndIdx(MBB); } } - VNInfo *vni = - interval.getNextValue(SlotIndex(getMBBStartIdx(MBB), true), - 0, false, VNInfoAllocator); + SlotIndex defIdx = getMBBStartIdx(MBB); + assert(getInstructionFromIndex(defIdx) == 0 && + "PHI def index points at actual instruction."); + VNInfo *vni = interval.getNextValue(defIdx, VNInfoAllocator); vni->setIsPHIDef(true); LiveRange LR(start, end, vni); @@ -691,10 +548,14 @@ void LiveIntervals::computeIntervals() { << "********** Function: " << ((Value*)mf_->getFunction())->getName() << '\n'); + RegMaskBlocks.resize(mf_->getNumBlockIDs()); + SmallVector UndefUses; for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end(); MBBI != E; ++MBBI) { MachineBasicBlock *MBB = MBBI; + RegMaskBlocks[MBB->getNumber()].first = RegMaskSlots.size(); + if (MBB->empty()) continue; @@ -707,11 +568,6 @@ void LiveIntervals::computeIntervals() { for (MachineBasicBlock::livein_iterator LI = MBB->livein_begin(), LE = MBB->livein_end(); LI != LE; ++LI) { handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI)); - // Multiple live-ins can alias the same register. - for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS) - if (!hasInterval(*AS)) - handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS), - true); } // Skip over empty initial indices. @@ -723,10 +579,20 @@ void LiveIntervals::computeIntervals() { DEBUG(dbgs() << MIIndex << "\t" << *MI); if (MI->isDebugValue()) continue; + assert(indexes_->getInstructionFromIndex(MIIndex) == MI && + "Lost SlotIndex synchronization"); // Handle defs. for (int i = MI->getNumOperands() - 1; i >= 0; --i) { MachineOperand &MO = MI->getOperand(i); + + // Collect register masks. + if (MO.isRegMask()) { + RegMaskSlots.push_back(MIIndex.getRegSlot()); + RegMaskBits.push_back(MO.getRegMask()); + continue; + } + if (!MO.isReg() || !MO.getReg()) continue; @@ -740,6 +606,10 @@ void LiveIntervals::computeIntervals() { // Move to the next instr slot. MIIndex = indexes_->getNextNonNullIndex(MIIndex); } + + // Compute the number of register mask instructions in this block. + std::pair &RMB = RegMaskBlocks[MBB->getNumber()]; + RMB.second = RegMaskSlots.size() - RMB.first;; } // Create empty intervals for registers defined by implicit_def's (except @@ -764,10 +634,169 @@ LiveInterval* LiveIntervals::dupInterval(LiveInterval *li) { return NewLI; } +/// shrinkToUses - After removing some uses of a register, shrink its live +/// range to just the remaining uses. This method does not compute reaching +/// defs for new uses, and it doesn't remove dead defs. +bool LiveIntervals::shrinkToUses(LiveInterval *li, + SmallVectorImpl *dead) { + DEBUG(dbgs() << "Shrink: " << *li << '\n'); + assert(TargetRegisterInfo::isVirtualRegister(li->reg) + && "Can only shrink virtual registers"); + // Find all the values used, including PHI kills. + SmallVector, 16> WorkList; + + // Blocks that have already been added to WorkList as live-out. + SmallPtrSet LiveOut; + + // Visit all instructions reading li->reg. + for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li->reg); + MachineInstr *UseMI = I.skipInstruction();) { + if (UseMI->isDebugValue() || !UseMI->readsVirtualRegister(li->reg)) + continue; + SlotIndex Idx = getInstructionIndex(UseMI).getRegSlot(); + // Note: This intentionally picks up the wrong VNI in case of an EC redef. + // See below. + VNInfo *VNI = li->getVNInfoBefore(Idx); + if (!VNI) { + // This shouldn't happen: readsVirtualRegister returns true, but there is + // no live value. It is likely caused by a target getting flags + // wrong. + DEBUG(dbgs() << Idx << '\t' << *UseMI + << "Warning: Instr claims to read non-existent value in " + << *li << '\n'); + continue; + } + // Special case: An early-clobber tied operand reads and writes the + // register one slot early. The getVNInfoBefore call above would have + // picked up the value defined by UseMI. Adjust the kill slot and value. + if (SlotIndex::isSameInstr(VNI->def, Idx)) { + Idx = VNI->def; + VNI = li->getVNInfoBefore(Idx); + assert(VNI && "Early-clobber tied value not available"); + } + WorkList.push_back(std::make_pair(Idx, VNI)); + } + + // Create a new live interval with only minimal live segments per def. + LiveInterval NewLI(li->reg, 0); + for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); + I != E; ++I) { + VNInfo *VNI = *I; + if (VNI->isUnused()) + continue; + NewLI.addRange(LiveRange(VNI->def, VNI->def.getDeadSlot(), VNI)); + } + + // Keep track of the PHIs that are in use. + SmallPtrSet UsedPHIs; + + // Extend intervals to reach all uses in WorkList. + while (!WorkList.empty()) { + SlotIndex Idx = WorkList.back().first; + VNInfo *VNI = WorkList.back().second; + WorkList.pop_back(); + const MachineBasicBlock *MBB = getMBBFromIndex(Idx.getPrevSlot()); + SlotIndex BlockStart = getMBBStartIdx(MBB); + + // Extend the live range for VNI to be live at Idx. + if (VNInfo *ExtVNI = NewLI.extendInBlock(BlockStart, Idx)) { + (void)ExtVNI; + assert(ExtVNI == VNI && "Unexpected existing value number"); + // Is this a PHIDef we haven't seen before? + if (!VNI->isPHIDef() || VNI->def != BlockStart || !UsedPHIs.insert(VNI)) + continue; + // The PHI is live, make sure the predecessors are live-out. + for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), + PE = MBB->pred_end(); PI != PE; ++PI) { + if (!LiveOut.insert(*PI)) + continue; + SlotIndex Stop = getMBBEndIdx(*PI); + // A predecessor is not required to have a live-out value for a PHI. + if (VNInfo *PVNI = li->getVNInfoBefore(Stop)) + WorkList.push_back(std::make_pair(Stop, PVNI)); + } + continue; + } + + // VNI is live-in to MBB. + DEBUG(dbgs() << " live-in at " << BlockStart << '\n'); + NewLI.addRange(LiveRange(BlockStart, Idx, VNI)); + + // Make sure VNI is live-out from the predecessors. + for (MachineBasicBlock::const_pred_iterator PI = MBB->pred_begin(), + PE = MBB->pred_end(); PI != PE; ++PI) { + if (!LiveOut.insert(*PI)) + continue; + SlotIndex Stop = getMBBEndIdx(*PI); + assert(li->getVNInfoBefore(Stop) == VNI && + "Wrong value out of predecessor"); + WorkList.push_back(std::make_pair(Stop, VNI)); + } + } + + // Handle dead values. + bool CanSeparate = false; + for (LiveInterval::vni_iterator I = li->vni_begin(), E = li->vni_end(); + I != E; ++I) { + VNInfo *VNI = *I; + if (VNI->isUnused()) + continue; + LiveInterval::iterator LII = NewLI.FindLiveRangeContaining(VNI->def); + assert(LII != NewLI.end() && "Missing live range for PHI"); + if (LII->end != VNI->def.getDeadSlot()) + continue; + if (VNI->isPHIDef()) { + // This is a dead PHI. Remove it. + VNI->setIsUnused(true); + NewLI.removeRange(*LII); + DEBUG(dbgs() << "Dead PHI at " << VNI->def << " may separate interval\n"); + CanSeparate = true; + } else { + // This is a dead def. Make sure the instruction knows. + MachineInstr *MI = getInstructionFromIndex(VNI->def); + assert(MI && "No instruction defining live value"); + MI->addRegisterDead(li->reg, tri_); + if (dead && MI->allDefsAreDead()) { + DEBUG(dbgs() << "All defs dead: " << VNI->def << '\t' << *MI); + dead->push_back(MI); + } + } + } + + // Move the trimmed ranges back. + li->ranges.swap(NewLI.ranges); + DEBUG(dbgs() << "Shrunk: " << *li << '\n'); + return CanSeparate; +} + + //===----------------------------------------------------------------------===// // Register allocator hooks. // +void LiveIntervals::addKillFlags() { + for (iterator I = begin(), E = end(); I != E; ++I) { + unsigned Reg = I->first; + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + continue; + if (mri_->reg_nodbg_empty(Reg)) + continue; + LiveInterval *LI = I->second; + + // Every instruction that kills Reg corresponds to a live range end point. + for (LiveInterval::iterator RI = LI->begin(), RE = LI->end(); RI != RE; + ++RI) { + // A block index indicates an MBB edge. + if (RI->end.isBlock()) + continue; + MachineInstr *MI = getInstructionFromIndex(RI->end); + if (!MI) + continue; + MI->addRegisterKilled(Reg, NULL); + } + } +} + /// getReMatImplicitUse - If the remat definition MI has one (for now, we only /// allow one) virtual register operand, then its uses are implicitly using /// the register. Returns the virtual register. @@ -782,16 +811,10 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, if (Reg == 0 || Reg == li.reg) continue; - if (TargetRegisterInfo::isPhysicalRegister(Reg) && - !allocatableRegs_[Reg]) + if (TargetRegisterInfo::isPhysicalRegister(Reg) && !isAllocatable(Reg)) continue; - // FIXME: For now, only remat MI with at most one register operand. - assert(!RegOp && - "Can't rematerialize instruction with multiple register operand!"); RegOp = MO.getReg(); -#ifndef NDEBUG - break; -#endif + break; // Found vreg operand - leave the loop. } return RegOp; } @@ -800,18 +823,17 @@ unsigned LiveIntervals::getReMatImplicitUse(const LiveInterval &li, /// which reaches the given instruction also reaches the specified use index. bool LiveIntervals::isValNoAvailableAt(const LiveInterval &li, MachineInstr *MI, SlotIndex UseIdx) const { - SlotIndex Index = getInstructionIndex(MI); - VNInfo *ValNo = li.FindLiveRangeContaining(Index)->valno; - LiveInterval::const_iterator UI = li.FindLiveRangeContaining(UseIdx); - return UI != li.end() && UI->valno == ValNo; + VNInfo *UValNo = li.getVNInfoAt(UseIdx); + return UValNo && UValNo == li.getVNInfoAt(getInstructionIndex(MI)); } /// isReMaterializable - Returns true if the definition MI of the specified /// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - const VNInfo *ValNo, MachineInstr *MI, - SmallVectorImpl &SpillIs, - bool &isLoad) { +bool +LiveIntervals::isReMaterializable(const LiveInterval &li, + const VNInfo *ValNo, MachineInstr *MI, + const SmallVectorImpl *SpillIs, + bool &isLoad) { if (DisableReMat) return false; @@ -829,7 +851,7 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, ri != re; ++ri) { MachineInstr *UseMI = &*ri; SlotIndex UseIdx = getInstructionIndex(UseMI); - if (li.FindLiveRangeContaining(UseIdx)->valno != ValNo) + if (li.getVNInfoAt(UseIdx) != ValNo) continue; if (!isValNoAvailableAt(ImpLi, MI, UseIdx)) return false; @@ -837,27 +859,20 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, // If a register operand of the re-materialized instruction is going to // be spilled next, then it's not legal to re-materialize this instruction. - for (unsigned i = 0, e = SpillIs.size(); i != e; ++i) - if (ImpUse == SpillIs[i]->reg) - return false; + if (SpillIs) + for (unsigned i = 0, e = SpillIs->size(); i != e; ++i) + if (ImpUse == (*SpillIs)[i]->reg) + return false; } return true; } -/// isReMaterializable - Returns true if the definition MI of the specified -/// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - const VNInfo *ValNo, MachineInstr *MI) { - SmallVector Dummy1; - bool Dummy2; - return isReMaterializable(li, ValNo, MI, Dummy1, Dummy2); -} - /// isReMaterializable - Returns true if every definition of MI of every /// val# of the specified interval is re-materializable. -bool LiveIntervals::isReMaterializable(const LiveInterval &li, - SmallVectorImpl &SpillIs, - bool &isLoad) { +bool +LiveIntervals::isReMaterializable(const LiveInterval &li, + const SmallVectorImpl *SpillIs, + bool &isLoad) { isLoad = false; for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); i != e; ++i) { @@ -865,9 +880,9 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, if (VNI->isUnused()) continue; // Dead val#. // Is the def for the val# rematerializable? - if (!VNI->isDefAccurate()) - return false; MachineInstr *ReMatDefMI = getInstructionFromIndex(VNI->def); + if (!ReMatDefMI) + return false; bool DefIsLoad = false; if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, SpillIs, DefIsLoad)) @@ -877,1132 +892,653 @@ bool LiveIntervals::isReMaterializable(const LiveInterval &li, return true; } -/// FilterFoldedOps - Filter out two-address use operands. Return -/// true if it finds any issue with the operands that ought to prevent -/// folding. -static bool FilterFoldedOps(MachineInstr *MI, - SmallVector &Ops, - unsigned &MRInfo, - SmallVector &FoldOps) { - MRInfo = 0; - for (unsigned i = 0, e = Ops.size(); i != e; ++i) { - unsigned OpIdx = Ops[i]; - MachineOperand &MO = MI->getOperand(OpIdx); - // FIXME: fold subreg use. - if (MO.getSubReg()) - return true; - if (MO.isDef()) - MRInfo |= (unsigned)VirtRegMap::isMod; - else { - // Filter out two-address use operand(s). - if (MI->isRegTiedToDefOperand(OpIdx)) { - MRInfo = VirtRegMap::isModRef; - continue; - } - MRInfo |= (unsigned)VirtRegMap::isRef; - } - FoldOps.push_back(OpIdx); - } - return false; +MachineBasicBlock* +LiveIntervals::intervalIsInOneMBB(const LiveInterval &LI) const { + // A local live range must be fully contained inside the block, meaning it is + // defined and killed at instructions, not at block boundaries. It is not + // live in or or out of any block. + // + // It is technically possible to have a PHI-defined live range identical to a + // single block, but we are going to return false in that case. + + SlotIndex Start = LI.beginIndex(); + if (Start.isBlock()) + return NULL; + + SlotIndex Stop = LI.endIndex(); + if (Stop.isBlock()) + return NULL; + + // getMBBFromIndex doesn't need to search the MBB table when both indexes + // belong to proper instructions. + MachineBasicBlock *MBB1 = indexes_->getMBBFromIndex(Start); + MachineBasicBlock *MBB2 = indexes_->getMBBFromIndex(Stop); + return MBB1 == MBB2 ? MBB1 : NULL; } +float +LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { + // Limit the loop depth ridiculousness. + if (loopDepth > 200) + loopDepth = 200; -/// tryFoldMemoryOperand - Attempts to fold either a spill / restore from -/// slot / to reg or any rematerialized load into ith operand of specified -/// MI. If it is successul, MI is updated with the newly created MI and -/// returns true. -bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI, - VirtRegMap &vrm, MachineInstr *DefMI, - SlotIndex InstrIdx, - SmallVector &Ops, - bool isSS, int Slot, unsigned Reg) { - // If it is an implicit def instruction, just delete it. - if (MI->isImplicitDef()) { - RemoveMachineInstrFromMaps(MI); - vrm.RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); - ++numFolds; - return true; - } + // The loop depth is used to roughly estimate the number of times the + // instruction is executed. Something like 10^d is simple, but will quickly + // overflow a float. This expression behaves like 10^d for small d, but is + // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of + // headroom before overflow. + // By the way, powf() might be unavailable here. For consistency, + // We may take pow(double,double). + float lc = std::pow(1 + (100.0 / (loopDepth + 10)), (double)loopDepth); - // Filter the list of operand indexes that are to be folded. Abort if - // any operand will prevent folding. - unsigned MRInfo = 0; - SmallVector FoldOps; - if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) - return false; + return (isDef + isUse) * lc; +} - // The only time it's safe to fold into a two address instruction is when - // it's folding reload and spill from / into a spill stack slot. - if (DefMI && (MRInfo & VirtRegMap::isMod)) - return false; +LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, + MachineInstr* startInst) { + LiveInterval& Interval = getOrCreateInterval(reg); + VNInfo* VN = Interval.getNextValue( + SlotIndex(getInstructionIndex(startInst).getRegSlot()), + getVNInfoAllocator()); + VN->setHasPHIKill(true); + LiveRange LR( + SlotIndex(getInstructionIndex(startInst).getRegSlot()), + getMBBEndIdx(startInst->getParent()), VN); + Interval.addRange(LR); - MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(MI, FoldOps, Slot) - : tii_->foldMemoryOperand(MI, FoldOps, DefMI); - if (fmi) { - // Remember this instruction uses the spill slot. - if (isSS) vrm.addSpillSlotUse(Slot, fmi); - - // Attempt to fold the memory reference into the instruction. If - // we can do this, we don't need to insert spill code. - if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot)) - vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo); - vrm.transferSpillPts(MI, fmi); - vrm.transferRestorePts(MI, fmi); - vrm.transferEmergencySpills(MI, fmi); - ReplaceMachineInstrInMaps(MI, fmi); - MI->eraseFromParent(); - MI = fmi; - ++numFolds; - return true; - } - return false; + return LR; } -/// canFoldMemoryOperand - Returns true if the specified load / store -/// folding is possible. -bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI, - SmallVector &Ops, - bool ReMat) const { - // Filter the list of operand indexes that are to be folded. Abort if - // any operand will prevent folding. - unsigned MRInfo = 0; - SmallVector FoldOps; - if (FilterFoldedOps(MI, Ops, MRInfo, FoldOps)) + +//===----------------------------------------------------------------------===// +// Register mask functions +//===----------------------------------------------------------------------===// + +bool LiveIntervals::checkRegMaskInterference(LiveInterval &LI, + BitVector &UsableRegs) { + if (LI.empty()) return false; + LiveInterval::iterator LiveI = LI.begin(), LiveE = LI.end(); + + // Use a smaller arrays for local live ranges. + ArrayRef Slots; + ArrayRef Bits; + if (MachineBasicBlock *MBB = intervalIsInOneMBB(LI)) { + Slots = getRegMaskSlotsInBlock(MBB->getNumber()); + Bits = getRegMaskBitsInBlock(MBB->getNumber()); + } else { + Slots = getRegMaskSlots(); + Bits = getRegMaskBits(); + } - // It's only legal to remat for a use, not a def. - if (ReMat && (MRInfo & VirtRegMap::isMod)) + // We are going to enumerate all the register mask slots contained in LI. + // Start with a binary search of RegMaskSlots to find a starting point. + ArrayRef::iterator SlotI = + std::lower_bound(Slots.begin(), Slots.end(), LiveI->start); + ArrayRef::iterator SlotE = Slots.end(); + + // No slots in range, LI begins after the last call. + if (SlotI == SlotE) return false; - return tii_->canFoldMemoryOperand(MI, FoldOps); + bool Found = false; + for (;;) { + assert(*SlotI >= LiveI->start); + // Loop over all slots overlapping this segment. + while (*SlotI < LiveI->end) { + // *SlotI overlaps LI. Collect mask bits. + if (!Found) { + // This is the first overlap. Initialize UsableRegs to all ones. + UsableRegs.clear(); + UsableRegs.resize(tri_->getNumRegs(), true); + Found = true; + } + // Remove usable registers clobbered by this mask. + UsableRegs.clearBitsNotInMask(Bits[SlotI-Slots.begin()]); + if (++SlotI == SlotE) + return Found; + } + // *SlotI is beyond the current LI segment. + LiveI = LI.advanceTo(LiveI, *SlotI); + if (LiveI == LiveE) + return Found; + // Advance SlotI until it overlaps. + while (*SlotI < LiveI->start) + if (++SlotI == SlotE) + return Found; + } } -bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const { - LiveInterval::Ranges::const_iterator itr = li.ranges.begin(); +//===----------------------------------------------------------------------===// +// IntervalUpdate class. +//===----------------------------------------------------------------------===// - MachineBasicBlock *mbb = indexes_->getMBBCoveringRange(itr->start, itr->end); +// HMEditor is a toolkit used by handleMove to trim or extend live intervals. +class LiveIntervals::HMEditor { +private: + LiveIntervals& LIS; + const MachineRegisterInfo& MRI; + const TargetRegisterInfo& TRI; + SlotIndex NewIdx; + + typedef std::pair IntRangePair; + typedef DenseSet RangeSet; + + struct RegRanges { + LiveRange* Use; + LiveRange* EC; + LiveRange* Dead; + LiveRange* Def; + RegRanges() : Use(0), EC(0), Dead(0), Def(0) {} + }; + typedef DenseMap BundleRanges; + +public: + HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI, + const TargetRegisterInfo& TRI, SlotIndex NewIdx) + : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {} + + // Update intervals for all operands of MI from OldIdx to NewIdx. + // This assumes that MI used to be at OldIdx, and now resides at + // NewIdx. + void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) { + assert(NewIdx != OldIdx && "No-op move? That's a bit strange."); + + // Collect the operands. + RangeSet Entering, Internal, Exiting; + bool hasRegMaskOp = false; + collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); + + // To keep the LiveRanges valid within an interval, move the ranges closest + // to the destination first. This prevents ranges from overlapping, to that + // APIs like removeRange still work. + if (NewIdx < OldIdx) { + moveAllEnteringFrom(OldIdx, Entering); + moveAllInternalFrom(OldIdx, Internal); + moveAllExitingFrom(OldIdx, Exiting); + } + else { + moveAllExitingFrom(OldIdx, Exiting); + moveAllInternalFrom(OldIdx, Internal); + moveAllEnteringFrom(OldIdx, Entering); + } - if (mbb == 0) - return false; + if (hasRegMaskOp) + updateRegMaskSlots(OldIdx); - for (++itr; itr != li.ranges.end(); ++itr) { - MachineBasicBlock *mbb2 = - indexes_->getMBBCoveringRange(itr->start, itr->end); +#ifndef NDEBUG + LIValidator validator; + validator = std::for_each(Entering.begin(), Entering.end(), validator); + validator = std::for_each(Internal.begin(), Internal.end(), validator); + validator = std::for_each(Exiting.begin(), Exiting.end(), validator); + assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness."); +#endif - if (mbb2 != mbb) - return false; } - return true; -} + // Update intervals for all operands of MI to refer to BundleStart's + // SlotIndex. + void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) { + if (MI == BundleStart) + return; // Bundling instr with itself - nothing to do. + + SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI); + assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI && + "SlotIndex <-> Instruction mapping broken for MI"); + + // Collect all ranges already in the bundle. + MachineBasicBlock::instr_iterator BII(BundleStart); + RangeSet Entering, Internal, Exiting; + bool hasRegMaskOp = false; + collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); + assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); + for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) { + if (&*BII == MI) + continue; + collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx); + assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); + } -/// rewriteImplicitOps - Rewrite implicit use operands of MI (i.e. uses of -/// interval on to-be re-materialized operands of MI) with new register. -void LiveIntervals::rewriteImplicitOps(const LiveInterval &li, - MachineInstr *MI, unsigned NewVReg, - VirtRegMap &vrm) { - // There is an implicit use. That means one of the other operand is - // being remat'ed and the remat'ed instruction has li.reg as an - // use operand. Make sure we rewrite that as well. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg()) - continue; - unsigned Reg = MO.getReg(); - if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) - continue; - if (!vrm.isReMaterialized(Reg)) - continue; - MachineInstr *ReMatMI = vrm.getReMaterializedMI(Reg); - MachineOperand *UseMO = ReMatMI->findRegisterUseOperand(li.reg); - if (UseMO) - UseMO->setReg(NewVReg); - } -} + BundleRanges BR = createBundleRanges(Entering, Internal, Exiting); -/// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions -/// for addIntervalsForSpills to rewrite uses / defs for the given live range. -bool LiveIntervals:: -rewriteInstructionForSpills(const LiveInterval &li, const VNInfo *VNI, - bool TrySplit, SlotIndex index, SlotIndex end, - MachineInstr *MI, - MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, - unsigned Slot, int LdSlot, - bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, - VirtRegMap &vrm, - const TargetRegisterClass* rc, - SmallVector &ReMatIds, - const MachineLoopInfo *loopInfo, - unsigned &NewVReg, unsigned ImpUse, bool &HasDef, bool &HasUse, - DenseMap &MBBVRegsMap, - std::vector &NewLIs) { - bool CanFold = false; - RestartInstruction: - for (unsigned i = 0; i != MI->getNumOperands(); ++i) { - MachineOperand& mop = MI->getOperand(i); - if (!mop.isReg()) - continue; - unsigned Reg = mop.getReg(); - if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg)) - continue; - if (Reg != li.reg) - continue; + collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx); + assert(!hasRegMaskOp && "Can't have RegMask operand in bundle."); - bool TryFold = !DefIsReMat; - bool FoldSS = true; // Default behavior unless it's a remat. - int FoldSlot = Slot; - if (DefIsReMat) { - // If this is the rematerializable definition MI itself and - // all of its uses are rematerialized, simply delete it. - if (MI == ReMatOrigDefMI && CanDelete) { - DEBUG(dbgs() << "\t\t\t\tErasing re-materializable def: " - << *MI << '\n'); - RemoveMachineInstrFromMaps(MI); - vrm.RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); - break; - } + DEBUG(dbgs() << "Entering: " << Entering.size() << "\n"); + DEBUG(dbgs() << "Internal: " << Internal.size() << "\n"); + DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n"); - // If def for this use can't be rematerialized, then try folding. - // If def is rematerializable and it's a load, also try folding. - TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad)); - if (isLoad) { - // Try fold loads (from stack slot, constant pool, etc.) into uses. - FoldSS = isLoadSS; - FoldSlot = LdSlot; - } - } + moveAllEnteringFromInto(OldIdx, Entering, BR); + moveAllInternalFromInto(OldIdx, Internal, BR); + moveAllExitingFromInto(OldIdx, Exiting, BR); - // Scan all of the operands of this instruction rewriting operands - // to use NewVReg instead of li.reg as appropriate. We do this for - // two reasons: - // - // 1. If the instr reads the same spilled vreg multiple times, we - // want to reuse the NewVReg. - // 2. If the instr is a two-addr instruction, we are required to - // keep the src/dst regs pinned. - // - // Keep track of whether we replace a use and/or def so that we can - // create the spill interval with the appropriate range. - SmallVector Ops; - tie(HasUse, HasDef) = MI->readsWritesVirtualRegister(Reg, &Ops); - - // Create a new virtual register for the spill interval. - // Create the new register now so we can map the fold instruction - // to the new register so when it is unfolded we get the correct - // answer. - bool CreatedNewVReg = false; - if (NewVReg == 0) { - NewVReg = mri_->createVirtualRegister(rc); - vrm.grow(); - CreatedNewVReg = true; - - // The new virtual register should get the same allocation hints as the - // old one. - std::pair Hint = mri_->getRegAllocationHint(Reg); - if (Hint.first || Hint.second) - mri_->setRegAllocationHint(NewVReg, Hint.first, Hint.second); - } - if (!TryFold) - CanFold = false; - else { - // Do not fold load / store here if we are splitting. We'll find an - // optimal point to insert a load / store later. - if (!TrySplit) { - if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, - Ops, FoldSS, FoldSlot, NewVReg)) { - // Folding the load/store can completely change the instruction in - // unpredictable ways, rescan it from the beginning. - - if (FoldSS) { - // We need to give the new vreg the same stack slot as the - // spilled interval. - vrm.assignVirt2StackSlot(NewVReg, FoldSlot); - } - - HasUse = false; - HasDef = false; - CanFold = false; - if (isNotInMIMap(MI)) - break; - goto RestartInstruction; - } - } else { - // We'll try to fold it later if it's profitable. - CanFold = canFoldMemoryOperand(MI, Ops, DefIsReMat); - } - } - - mop.setReg(NewVReg); - if (mop.isImplicit()) - rewriteImplicitOps(li, MI, NewVReg, vrm); +#ifndef NDEBUG + LIValidator validator; + validator = std::for_each(Entering.begin(), Entering.end(), validator); + validator = std::for_each(Internal.begin(), Internal.end(), validator); + validator = std::for_each(Exiting.begin(), Exiting.end(), validator); + assert(validator.rangesOk() && "moveAllOperandsInto broke liveness."); +#endif + } - // Reuse NewVReg for other reads. - for (unsigned j = 0, e = Ops.size(); j != e; ++j) { - MachineOperand &mopj = MI->getOperand(Ops[j]); - mopj.setReg(NewVReg); - if (mopj.isImplicit()) - rewriteImplicitOps(li, MI, NewVReg, vrm); - } +private: - if (CreatedNewVReg) { - if (DefIsReMat) { - vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI); - if (ReMatIds[VNI->id] == VirtRegMap::MAX_STACK_SLOT) { - // Each valnum may have its own remat id. - ReMatIds[VNI->id] = vrm.assignVirtReMatId(NewVReg); - } else { - vrm.assignVirtReMatId(NewVReg, ReMatIds[VNI->id]); - } - if (!CanDelete || (HasUse && HasDef)) { - // If this is a two-addr instruction then its use operands are - // rematerializable but its def is not. It should be assigned a - // stack slot. - vrm.assignVirt2StackSlot(NewVReg, Slot); - } - } else { - vrm.assignVirt2StackSlot(NewVReg, Slot); +#ifndef NDEBUG + class LIValidator { + private: + DenseSet Checked, Bogus; + public: + void operator()(const IntRangePair& P) { + const LiveInterval* LI = P.first; + if (Checked.count(LI)) + return; + Checked.insert(LI); + if (LI->empty()) + return; + SlotIndex LastEnd = LI->begin()->start; + for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end(); + LRI != LRE; ++LRI) { + const LiveRange& LR = *LRI; + if (LastEnd > LR.start || LR.start >= LR.end) + Bogus.insert(LI); + LastEnd = LR.end; } - } else if (HasUse && HasDef && - vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) { - // If this interval hasn't been assigned a stack slot (because earlier - // def is a deleted remat def), do it now. - assert(Slot != VirtRegMap::NO_STACK_SLOT); - vrm.assignVirt2StackSlot(NewVReg, Slot); } - // Re-matting an instruction with virtual register use. Add the - // register as an implicit use on the use MI. - if (DefIsReMat && ImpUse) - MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); - - // Create a new register interval for this spill / remat. - LiveInterval &nI = getOrCreateInterval(NewVReg); - if (CreatedNewVReg) { - NewLIs.push_back(&nI); - MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg)); - if (TrySplit) - vrm.setIsSplitFromReg(NewVReg, li.reg); + bool rangesOk() const { + return Bogus.empty(); } + }; +#endif - if (HasUse) { - if (CreatedNewVReg) { - LiveRange LR(index.getLoadIndex(), index.getDefIndex(), - nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); - DEBUG(dbgs() << " +" << LR); - nI.addRange(LR); - } else { - // Extend the split live interval to this def / use. - SlotIndex End = index.getDefIndex(); - LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End, - nI.getValNumInfo(nI.getNumValNums()-1)); - DEBUG(dbgs() << " +" << LR); - nI.addRange(LR); + // Collect IntRangePairs for all operands of MI that may need fixing. + // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes' + // maps). + void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal, + RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) { + hasRegMaskOp = false; + for (MachineInstr::mop_iterator MOI = MI->operands_begin(), + MOE = MI->operands_end(); + MOI != MOE; ++MOI) { + const MachineOperand& MO = *MOI; + + if (MO.isRegMask()) { + hasRegMaskOp = true; + continue; } - } - if (HasDef) { - LiveRange LR(index.getDefIndex(), index.getStoreIndex(), - nI.getNextValue(SlotIndex(), 0, false, VNInfoAllocator)); - DEBUG(dbgs() << " +" << LR); - nI.addRange(LR); - } - DEBUG({ - dbgs() << "\t\t\t\tAdded new interval: "; - nI.print(dbgs(), tri_); - dbgs() << '\n'; - }); - } - return CanFold; -} -bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li, - const VNInfo *VNI, - MachineBasicBlock *MBB, - SlotIndex Idx) const { - return li.killedInRange(Idx.getNextSlot(), getMBBEndIdx(MBB)); -} + if (!MO.isReg() || MO.getReg() == 0) + continue; -/// RewriteInfo - Keep track of machine instrs that will be rewritten -/// during spilling. -namespace { - struct RewriteInfo { - SlotIndex Index; - MachineInstr *MI; - RewriteInfo(SlotIndex i, MachineInstr *mi) : Index(i), MI(mi) {} - }; + unsigned Reg = MO.getReg(); - struct RewriteInfoCompare { - bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const { - return LHS.Index < RHS.Index; - } - }; -} + // TODO: Currently we're skipping uses that are reserved or have no + // interval, but we're not updating their kills. This should be + // fixed. + if (!LIS.hasInterval(Reg) || + (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))) + continue; -void LiveIntervals:: -rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, - LiveInterval::Ranges::const_iterator &I, - MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI, - unsigned Slot, int LdSlot, - bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete, - VirtRegMap &vrm, - const TargetRegisterClass* rc, - SmallVector &ReMatIds, - const MachineLoopInfo *loopInfo, - BitVector &SpillMBBs, - DenseMap > &SpillIdxes, - BitVector &RestoreMBBs, - DenseMap > &RestoreIdxes, - DenseMap &MBBVRegsMap, - std::vector &NewLIs) { - bool AllCanFold = true; - unsigned NewVReg = 0; - SlotIndex start = I->start.getBaseIndex(); - SlotIndex end = I->end.getPrevSlot().getBaseIndex().getNextIndex(); - - // First collect all the def / use in this live range that will be rewritten. - // Make sure they are sorted according to instruction index. - std::vector RewriteMIs; - for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), - re = mri_->reg_end(); ri != re; ) { - MachineInstr *MI = &*ri; - MachineOperand &O = ri.getOperand(); - ++ri; - if (MI->isDebugValue()) { - // Modify DBG_VALUE now that the value is in a spill slot. - if (Slot != VirtRegMap::MAX_STACK_SLOT || isLoadSS) { - uint64_t Offset = MI->getOperand(1).getImm(); - const MDNode *MDPtr = MI->getOperand(2).getMetadata(); - DebugLoc DL = MI->getDebugLoc(); - int FI = isLoadSS ? LdSlot : (int)Slot; - if (MachineInstr *NewDV = tii_->emitFrameIndexDebugValue(*mf_, FI, - Offset, MDPtr, DL)) { - DEBUG(dbgs() << "Modifying debug info due to spill:" << "\t" << *MI); - ReplaceMachineInstrInMaps(MI, NewDV); - MachineBasicBlock *MBB = MI->getParent(); - MBB->insert(MBB->erase(MI), NewDV); - continue; + LiveInterval* LI = &LIS.getInterval(Reg); + + if (MO.readsReg()) { + LiveRange* LR = LI->getLiveRangeContaining(OldIdx); + if (LR != 0) + Entering.insert(std::make_pair(LI, LR)); + } + if (MO.isDef()) { + if (MO.isEarlyClobber()) { + LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true)); + assert(LR != 0 && "No EC range?"); + if (LR->end > OldIdx.getDeadSlot()) + Exiting.insert(std::make_pair(LI, LR)); + else + Internal.insert(std::make_pair(LI, LR)); + } else if (MO.isDead()) { + LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot()); + assert(LR != 0 && "No dead-def range?"); + Internal.insert(std::make_pair(LI, LR)); + } else { + LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot()); + assert(LR && LR->end > OldIdx.getDeadSlot() && + "Non-dead-def should have live range exiting."); + Exiting.insert(std::make_pair(LI, LR)); } } - - DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); - RemoveMachineInstrFromMaps(MI); - vrm.RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); - continue; } - assert(!(O.isImplicit() && O.isUse()) && - "Spilling register that's used as implicit use?"); - SlotIndex index = getInstructionIndex(MI); - if (index < start || index >= end) - continue; - - if (O.isUndef()) - // Must be defined by an implicit def. It should not be spilled. Note, - // this is for correctness reason. e.g. - // 8 %reg1024 = IMPLICIT_DEF - // 12 %reg1024 = INSERT_SUBREG %reg1024, %reg1025, 2 - // The live range [12, 14) are not part of the r1024 live interval since - // it's defined by an implicit def. It will not conflicts with live - // interval of r1025. Now suppose both registers are spilled, you can - // easily see a situation where both registers are reloaded before - // the INSERT_SUBREG and both target registers that would overlap. - continue; - RewriteMIs.push_back(RewriteInfo(index, MI)); } - std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare()); - - unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0; - // Now rewrite the defs and uses. - for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) { - RewriteInfo &rwi = RewriteMIs[i]; - ++i; - SlotIndex index = rwi.Index; - MachineInstr *MI = rwi.MI; - // If MI def and/or use the same register multiple times, then there - // are multiple entries. - while (i != e && RewriteMIs[i].MI == MI) { - assert(RewriteMIs[i].Index == index); - ++i; - } - MachineBasicBlock *MBB = MI->getParent(); - if (ImpUse && MI != ReMatDefMI) { - // Re-matting an instruction with virtual register use. Prevent interval - // from being spilled. - getInterval(ImpUse).markNotSpillable(); - } + // Collect IntRangePairs for all operands of MI that may need fixing. + void collectRangesInBundle(MachineInstr* MI, RangeSet& Entering, + RangeSet& Exiting, SlotIndex MIStartIdx, + SlotIndex MIEndIdx) { + for (MachineInstr::mop_iterator MOI = MI->operands_begin(), + MOE = MI->operands_end(); + MOI != MOE; ++MOI) { + const MachineOperand& MO = *MOI; + assert(!MO.isRegMask() && "Can't have RegMasks in bundles."); + if (!MO.isReg() || MO.getReg() == 0) + continue; - unsigned MBBId = MBB->getNumber(); - unsigned ThisVReg = 0; - if (TrySplit) { - DenseMap::iterator NVI = MBBVRegsMap.find(MBBId); - if (NVI != MBBVRegsMap.end()) { - ThisVReg = NVI->second; - // One common case: - // x = use - // ... - // ... - // def = ... - // = use - // It's better to start a new interval to avoid artifically - // extend the new interval. - if (MI->readsWritesVirtualRegister(li.reg) == - std::make_pair(false,true)) { - MBBVRegsMap.erase(MBB->getNumber()); - ThisVReg = 0; - } - } - } + unsigned Reg = MO.getReg(); + + // TODO: Currently we're skipping uses that are reserved or have no + // interval, but we're not updating their kills. This should be + // fixed. + if (!LIS.hasInterval(Reg) || + (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg))) + continue; - bool IsNew = ThisVReg == 0; - if (IsNew) { - // This ends the previous live interval. If all of its def / use - // can be folded, give it a low spill weight. - if (NewVReg && TrySplit && AllCanFold) { - LiveInterval &nI = getOrCreateInterval(NewVReg); - nI.weight /= 10.0F; + LiveInterval* LI = &LIS.getInterval(Reg); + + if (MO.readsReg()) { + LiveRange* LR = LI->getLiveRangeContaining(MIStartIdx); + if (LR != 0) + Entering.insert(std::make_pair(LI, LR)); + } + if (MO.isDef()) { + assert(!MO.isEarlyClobber() && "Early clobbers not allowed in bundles."); + assert(!MO.isDead() && "Dead-defs not allowed in bundles."); + LiveRange* LR = LI->getLiveRangeContaining(MIEndIdx.getDeadSlot()); + assert(LR != 0 && "Internal ranges not allowed in bundles."); + Exiting.insert(std::make_pair(LI, LR)); } - AllCanFold = true; } - NewVReg = ThisVReg; - - bool HasDef = false; - bool HasUse = false; - bool CanFold = rewriteInstructionForSpills(li, I->valno, TrySplit, - index, end, MI, ReMatOrigDefMI, ReMatDefMI, - Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, - CanDelete, vrm, rc, ReMatIds, loopInfo, NewVReg, - ImpUse, HasDef, HasUse, MBBVRegsMap, NewLIs); - if (!HasDef && !HasUse) - continue; + } - AllCanFold &= CanFold; + BundleRanges createBundleRanges(RangeSet& Entering, RangeSet& Internal, RangeSet& Exiting) { + BundleRanges BR; - // Update weight of spill interval. - LiveInterval &nI = getOrCreateInterval(NewVReg); - if (!TrySplit) { - // The spill weight is now infinity as it cannot be spilled again. - nI.markNotSpillable(); - continue; + for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); + EI != EE; ++EI) { + LiveInterval* LI = EI->first; + LiveRange* LR = EI->second; + BR[LI->reg].Use = LR; } - // Keep track of the last def and first use in each MBB. - if (HasDef) { - if (MI != ReMatOrigDefMI || !CanDelete) { - bool HasKill = false; - if (!HasUse) - HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, index.getDefIndex()); - else { - // If this is a two-address code, then this index starts a new VNInfo. - const VNInfo *VNI = li.findDefinedVNInfoForRegInt(index.getDefIndex()); - if (VNI) - HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, index.getDefIndex()); - } - DenseMap >::iterator SII = - SpillIdxes.find(MBBId); - if (!HasKill) { - if (SII == SpillIdxes.end()) { - std::vector S; - S.push_back(SRInfo(index, NewVReg, true)); - SpillIdxes.insert(std::make_pair(MBBId, S)); - } else if (SII->second.back().vreg != NewVReg) { - SII->second.push_back(SRInfo(index, NewVReg, true)); - } else if (index > SII->second.back().index) { - // If there is an earlier def and this is a two-address - // instruction, then it's not possible to fold the store (which - // would also fold the load). - SRInfo &Info = SII->second.back(); - Info.index = index; - Info.canFold = !HasUse; - } - SpillMBBs.set(MBBId); - } else if (SII != SpillIdxes.end() && - SII->second.back().vreg == NewVReg && - index > SII->second.back().index) { - // There is an earlier def that's not killed (must be two-address). - // The spill is no longer needed. - SII->second.pop_back(); - if (SII->second.empty()) { - SpillIdxes.erase(MBBId); - SpillMBBs.reset(MBBId); - } - } + for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); + II != IE; ++II) { + LiveInterval* LI = II->first; + LiveRange* LR = II->second; + if (LR->end.isDead()) { + BR[LI->reg].Dead = LR; + } else { + BR[LI->reg].EC = LR; } } - if (HasUse) { - DenseMap >::iterator SII = - SpillIdxes.find(MBBId); - if (SII != SpillIdxes.end() && - SII->second.back().vreg == NewVReg && - index > SII->second.back().index) - // Use(s) following the last def, it's not safe to fold the spill. - SII->second.back().canFold = false; - DenseMap >::iterator RII = - RestoreIdxes.find(MBBId); - if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg) - // If we are splitting live intervals, only fold if it's the first - // use and there isn't another use later in the MBB. - RII->second.back().canFold = false; - else if (IsNew) { - // Only need a reload if there isn't an earlier def / use. - if (RII == RestoreIdxes.end()) { - std::vector Infos; - Infos.push_back(SRInfo(index, NewVReg, true)); - RestoreIdxes.insert(std::make_pair(MBBId, Infos)); - } else { - RII->second.push_back(SRInfo(index, NewVReg, true)); - } - RestoreMBBs.set(MBBId); - } + for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); + EI != EE; ++EI) { + LiveInterval* LI = EI->first; + LiveRange* LR = EI->second; + BR[LI->reg].Def = LR; } - // Update spill weight. - unsigned loopDepth = loopInfo->getLoopDepth(MBB); - nI.weight += getSpillWeight(HasDef, HasUse, loopDepth); + return BR; } - if (NewVReg && TrySplit && AllCanFold) { - // If all of its def / use can be folded, give it a low spill weight. - LiveInterval &nI = getOrCreateInterval(NewVReg); - nI.weight /= 10.0F; + void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) { + MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx); + if (!OldKillMI->killsRegister(reg)) + return; // Bail out if we don't have kill flags on the old register. + MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx); + assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill."); + assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill."); + OldKillMI->clearRegisterKills(reg, &TRI); + NewKillMI->addRegisterKilled(reg, &TRI); } -} -bool LiveIntervals::alsoFoldARestore(int Id, SlotIndex index, - unsigned vr, BitVector &RestoreMBBs, - DenseMap > &RestoreIdxes) { - if (!RestoreMBBs[Id]) - return false; - std::vector &Restores = RestoreIdxes[Id]; - for (unsigned i = 0, e = Restores.size(); i != e; ++i) - if (Restores[i].index == index && - Restores[i].vreg == vr && - Restores[i].canFold) - return true; - return false; -} - -void LiveIntervals::eraseRestoreInfo(int Id, SlotIndex index, - unsigned vr, BitVector &RestoreMBBs, - DenseMap > &RestoreIdxes) { - if (!RestoreMBBs[Id]) - return; - std::vector &Restores = RestoreIdxes[Id]; - for (unsigned i = 0, e = Restores.size(); i != e; ++i) - if (Restores[i].index == index && Restores[i].vreg) - Restores[i].index = SlotIndex(); -} + void updateRegMaskSlots(SlotIndex OldIdx) { + SmallVectorImpl::iterator RI = + std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(), + OldIdx); + assert(*RI == OldIdx && "No RegMask at OldIdx."); + *RI = NewIdx; + assert(*prior(RI) < *RI && *RI < *next(RI) && + "RegSlots out of order. Did you move one call across another?"); + } -/// handleSpilledImpDefs - Remove IMPLICIT_DEF instructions which are being -/// spilled and create empty intervals for their uses. -void -LiveIntervals::handleSpilledImpDefs(const LiveInterval &li, VirtRegMap &vrm, - const TargetRegisterClass* rc, - std::vector &NewLIs) { - for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg), - re = mri_->reg_end(); ri != re; ) { - MachineOperand &O = ri.getOperand(); - MachineInstr *MI = &*ri; - ++ri; - if (MI->isDebugValue()) { - // Remove debug info for now. - O.setReg(0U); - DEBUG(dbgs() << "Removing debug info due to spill:" << "\t" << *MI); - continue; - } - if (O.isDef()) { - assert(MI->isImplicitDef() && - "Register def was not rewritten?"); - RemoveMachineInstrFromMaps(MI); - vrm.RemoveMachineInstrFromMaps(MI); - MI->eraseFromParent(); - } else { - // This must be an use of an implicit_def so it's not part of the live - // interval. Create a new empty live interval for it. - // FIXME: Can we simply erase some of the instructions? e.g. Stores? - unsigned NewVReg = mri_->createVirtualRegister(rc); - vrm.grow(); - vrm.setIsImplicitlyDefined(NewVReg); - NewLIs.push_back(&getOrCreateInterval(NewVReg)); - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (MO.isReg() && MO.getReg() == li.reg) { - MO.setReg(NewVReg); - MO.setIsUndef(); - } - } + // Return the last use of reg between NewIdx and OldIdx. + SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) { + SlotIndex LastUse = NewIdx; + for (MachineRegisterInfo::use_nodbg_iterator + UI = MRI.use_nodbg_begin(Reg), + UE = MRI.use_nodbg_end(); + UI != UE; UI.skipInstruction()) { + const MachineInstr* MI = &*UI; + SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI); + if (InstSlot > LastUse && InstSlot < OldIdx) + LastUse = InstSlot; } + return LastUse; } -} - -float -LiveIntervals::getSpillWeight(bool isDef, bool isUse, unsigned loopDepth) { - // Limit the loop depth ridiculousness. - if (loopDepth > 200) - loopDepth = 200; - - // The loop depth is used to roughly estimate the number of times the - // instruction is executed. Something like 10^d is simple, but will quickly - // overflow a float. This expression behaves like 10^d for small d, but is - // more tempered for large d. At d=200 we get 6.7e33 which leaves a bit of - // headroom before overflow. - float lc = std::pow(1 + (100.0f / (loopDepth+10)), (float)loopDepth); - - return (isDef + isUse) * lc; -} -void -LiveIntervals::normalizeSpillWeights(std::vector &NewLIs) { - for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) - normalizeSpillWeight(*NewLIs[i]); -} + void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + bool LiveThrough = LR->end > OldIdx.getRegSlot(); + if (LiveThrough) + return; + SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); + if (LastUse != NewIdx) + moveKillFlags(LI->reg, NewIdx, LastUse); + LR->end = LastUse.getRegSlot(); + } -std::vector LiveIntervals:: -addIntervalsForSpills(const LiveInterval &li, - SmallVectorImpl &SpillIs, - const MachineLoopInfo *loopInfo, VirtRegMap &vrm) { - assert(li.isSpillable() && "attempt to spill already spilled interval!"); - - DEBUG({ - dbgs() << "\t\t\t\tadding intervals for spills for interval: "; - li.print(dbgs(), tri_); - dbgs() << '\n'; - }); - - // Each bit specify whether a spill is required in the MBB. - BitVector SpillMBBs(mf_->getNumBlockIDs()); - DenseMap > SpillIdxes; - BitVector RestoreMBBs(mf_->getNumBlockIDs()); - DenseMap > RestoreIdxes; - DenseMap MBBVRegsMap; - std::vector NewLIs; - const TargetRegisterClass* rc = mri_->getRegClass(li.reg); - - unsigned NumValNums = li.getNumValNums(); - SmallVector ReMatDefs; - ReMatDefs.resize(NumValNums, NULL); - SmallVector ReMatOrigDefs; - ReMatOrigDefs.resize(NumValNums, NULL); - SmallVector ReMatIds; - ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT); - BitVector ReMatDelete(NumValNums); - unsigned Slot = VirtRegMap::MAX_STACK_SLOT; - - // Spilling a split live interval. It cannot be split any further. Also, - // it's also guaranteed to be a single val# / range interval. - if (vrm.getPreSplitReg(li.reg)) { - vrm.setIsSplitFromReg(li.reg, 0); - // Unset the split kill marker on the last use. - SlotIndex KillIdx = vrm.getKillPoint(li.reg); - if (KillIdx != SlotIndex()) { - MachineInstr *KillMI = getInstructionFromIndex(KillIdx); - assert(KillMI && "Last use disappeared?"); - int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true); - assert(KillOp != -1 && "Last use disappeared?"); - KillMI->getOperand(KillOp).setIsKill(false); - } - vrm.removeKillPoint(li.reg); - bool DefIsReMat = vrm.isReMaterialized(li.reg); - Slot = vrm.getStackSlot(li.reg); - assert(Slot != VirtRegMap::MAX_STACK_SLOT); - MachineInstr *ReMatDefMI = DefIsReMat ? - vrm.getReMaterializedMI(li.reg) : NULL; - int LdSlot = 0; - bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); - bool isLoad = isLoadSS || - (DefIsReMat && (ReMatDefMI->getDesc().canFoldAsLoad())); - bool IsFirstRange = true; - for (LiveInterval::Ranges::const_iterator - I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { - // If this is a split live interval with multiple ranges, it means there - // are two-address instructions that re-defined the value. Only the - // first def can be rematerialized! - if (IsFirstRange) { - // Note ReMatOrigDefMI has already been deleted. - rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI, - Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, - false, vrm, rc, ReMatIds, loopInfo, - SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, - MBBVRegsMap, NewLIs); - } else { - rewriteInstructionsForSpills(li, false, I, NULL, 0, - Slot, 0, false, false, false, - false, vrm, rc, ReMatIds, loopInfo, - SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, - MBBVRegsMap, NewLIs); + void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + // Extend the LiveRange if NewIdx is past the end. + if (NewIdx > LR->end) { + // Move kill flags if OldIdx was not originally the end + // (otherwise LR->end points to an invalid slot). + if (LR->end.getRegSlot() != OldIdx.getRegSlot()) { + assert(LR->end > OldIdx && "LiveRange does not cover original slot"); + moveKillFlags(LI->reg, LR->end, NewIdx); } - IsFirstRange = false; + LR->end = NewIdx.getRegSlot(); } - - handleSpilledImpDefs(li, vrm, rc, NewLIs); - normalizeSpillWeights(NewLIs); - return NewLIs; } - bool TrySplit = !intervalIsInOneMBB(li); - if (TrySplit) - ++numSplits; - bool NeedStackSlot = false; - for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end(); - i != e; ++i) { - const VNInfo *VNI = *i; - unsigned VN = VNI->id; - if (VNI->isUnused()) - continue; // Dead val#. - // Is the def for the val# rematerializable? - MachineInstr *ReMatDefMI = VNI->isDefAccurate() - ? getInstructionFromIndex(VNI->def) : 0; - bool dummy; - if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, SpillIs, dummy)) { - // Remember how to remat the def of this val#. - ReMatOrigDefs[VN] = ReMatDefMI; - // Original def may be modified so we have to make a copy here. - MachineInstr *Clone = mf_->CloneMachineInstr(ReMatDefMI); - CloneMIs.push_back(Clone); - ReMatDefs[VN] = Clone; - - bool CanDelete = true; - if (VNI->hasPHIKill()) { - // A kill is a phi node, not all of its uses can be rematerialized. - // It must not be deleted. - CanDelete = false; - // Need a stack slot if there is any live range where uses cannot be - // rematerialized. - NeedStackSlot = true; - } - if (CanDelete) - ReMatDelete.set(VN); + void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) { + bool GoingUp = NewIdx < OldIdx; + + if (GoingUp) { + for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); + EI != EE; ++EI) + moveEnteringUpFrom(OldIdx, *EI); } else { - // Need a stack slot if there is any live range where uses cannot be - // rematerialized. - NeedStackSlot = true; + for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); + EI != EE; ++EI) + moveEnteringDownFrom(OldIdx, *EI); } } - // One stack slot per live interval. - if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0) { - if (vrm.getStackSlot(li.reg) == VirtRegMap::NO_STACK_SLOT) - Slot = vrm.assignVirt2StackSlot(li.reg); - - // This case only occurs when the prealloc splitter has already assigned - // a stack slot to this vreg. - else - Slot = vrm.getStackSlot(li.reg); + void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && + LR->end <= OldIdx.getDeadSlot() && + "Range should be internal to OldIdx."); + LiveRange Tmp(*LR); + Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber()); + Tmp.valno->def = Tmp.start; + Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot(); + LI->removeRange(*LR); + LI->addRange(Tmp); } - // Create new intervals and rewrite defs and uses. - for (LiveInterval::Ranges::const_iterator - I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) { - MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id]; - MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id]; - bool DefIsReMat = ReMatDefMI != NULL; - bool CanDelete = ReMatDelete[I->valno->id]; - int LdSlot = 0; - bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); - bool isLoad = isLoadSS || - (DefIsReMat && ReMatDefMI->getDesc().canFoldAsLoad()); - rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI, - Slot, LdSlot, isLoad, isLoadSS, DefIsReMat, - CanDelete, vrm, rc, ReMatIds, loopInfo, - SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes, - MBBVRegsMap, NewLIs); + void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) { + for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); + II != IE; ++II) + moveInternalFrom(OldIdx, *II); } - // Insert spills / restores if we are splitting. - if (!TrySplit) { - handleSpilledImpDefs(li, vrm, rc, NewLIs); - normalizeSpillWeights(NewLIs); - return NewLIs; + void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) { + LiveRange* LR = P.second; + assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() && + "Range should start in OldIdx."); + assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx."); + SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber()); + LR->start = NewStart; + LR->valno->def = NewStart; } - SmallPtrSet AddedKill; - SmallVector Ops; - if (NeedStackSlot) { - int Id = SpillMBBs.find_first(); - while (Id != -1) { - std::vector &spills = SpillIdxes[Id]; - for (unsigned i = 0, e = spills.size(); i != e; ++i) { - SlotIndex index = spills[i].index; - unsigned VReg = spills[i].vreg; - LiveInterval &nI = getOrCreateInterval(VReg); - bool isReMat = vrm.isReMaterialized(VReg); - MachineInstr *MI = getInstructionFromIndex(index); - bool CanFold = false; - bool FoundUse = false; - Ops.clear(); - if (spills[i].canFold) { - CanFold = true; - for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { - MachineOperand &MO = MI->getOperand(j); - if (!MO.isReg() || MO.getReg() != VReg) - continue; - - Ops.push_back(j); - if (MO.isDef()) - continue; - if (isReMat || - (!FoundUse && !alsoFoldARestore(Id, index, VReg, - RestoreMBBs, RestoreIdxes))) { - // MI has two-address uses of the same register. If the use - // isn't the first and only use in the BB, then we can't fold - // it. FIXME: Move this to rewriteInstructionsForSpills. - CanFold = false; - break; - } - FoundUse = true; - } - } - // Fold the store into the def if possible. - bool Folded = false; - if (CanFold && !Ops.empty()) { - if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){ - Folded = true; - if (FoundUse) { - // Also folded uses, do not issue a load. - eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes); - nI.removeRange(index.getLoadIndex(), index.getDefIndex()); - } - nI.removeRange(index.getDefIndex(), index.getStoreIndex()); - } - } - - // Otherwise tell the spiller to issue a spill. - if (!Folded) { - LiveRange *LR = &nI.ranges[nI.ranges.size()-1]; - bool isKill = LR->end == index.getStoreIndex(); - if (!MI->registerDefIsDead(nI.reg)) - // No need to spill a dead def. - vrm.addSpillPoint(VReg, isKill, MI); - if (isKill) - AddedKill.insert(&nI); - } - } - Id = SpillMBBs.find_next(Id); - } + void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) { + for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); + EI != EE; ++EI) + moveExitingFrom(OldIdx, *EI); } - int Id = RestoreMBBs.find_first(); - while (Id != -1) { - std::vector &restores = RestoreIdxes[Id]; - for (unsigned i = 0, e = restores.size(); i != e; ++i) { - SlotIndex index = restores[i].index; - if (index == SlotIndex()) - continue; - unsigned VReg = restores[i].vreg; - LiveInterval &nI = getOrCreateInterval(VReg); - bool isReMat = vrm.isReMaterialized(VReg); - MachineInstr *MI = getInstructionFromIndex(index); - bool CanFold = false; - Ops.clear(); - if (restores[i].canFold) { - CanFold = true; - for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) { - MachineOperand &MO = MI->getOperand(j); - if (!MO.isReg() || MO.getReg() != VReg) - continue; - - if (MO.isDef()) { - // If this restore were to be folded, it would have been folded - // already. - CanFold = false; - break; - } - Ops.push_back(j); - } - } + void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P, + BundleRanges& BR) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + bool LiveThrough = LR->end > OldIdx.getRegSlot(); + if (LiveThrough) { + assert((LR->start < NewIdx || BR[LI->reg].Def == LR) && + "Def in bundle should be def range."); + assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && + "If bundle has use for this reg it should be LR."); + BR[LI->reg].Use = LR; + return; + } - // Fold the load into the use if possible. - bool Folded = false; - if (CanFold && !Ops.empty()) { - if (!isReMat) - Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg); - else { - MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg); - int LdSlot = 0; - bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot); - // If the rematerializable def is a load, also try to fold it. - if (isLoadSS || ReMatDefMI->getDesc().canFoldAsLoad()) - Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index, - Ops, isLoadSS, LdSlot, VReg); - if (!Folded) { - unsigned ImpUse = getReMatImplicitUse(li, ReMatDefMI); - if (ImpUse) { - // Re-matting an instruction with virtual register use. Add the - // register as an implicit use on the use MI and mark the register - // interval as unspillable. - LiveInterval &ImpLi = getInterval(ImpUse); - ImpLi.markNotSpillable(); - MI->addOperand(MachineOperand::CreateReg(ImpUse, false, true)); - } - } - } - } - // If folding is not possible / failed, then tell the spiller to issue a - // load / rematerialization for us. - if (Folded) - nI.removeRange(index.getLoadIndex(), index.getDefIndex()); - else - vrm.addRestorePoint(VReg, MI); + SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx); + moveKillFlags(LI->reg, OldIdx, LastUse); + + if (LR->start < NewIdx) { + // Becoming a new entering range. + assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 && + "Bundle shouldn't be re-defining reg mid-range."); + assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) && + "Bundle shouldn't have different use range for same reg."); + LR->end = LastUse.getRegSlot(); + BR[LI->reg].Use = LR; + } else { + // Becoming a new Dead-def. + assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) && + "Live range starting at unexpected slot."); + assert(BR[LI->reg].Def == LR && "Reg should have def range."); + assert(BR[LI->reg].Dead == 0 && + "Can't have def and dead def of same reg in a bundle."); + LR->end = LastUse.getDeadSlot(); + BR[LI->reg].Dead = BR[LI->reg].Def; + BR[LI->reg].Def = 0; } - Id = RestoreMBBs.find_next(Id); } - // Finalize intervals: add kills, finalize spill weights, and filter out - // dead intervals. - std::vector RetNewLIs; - for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) { - LiveInterval *LI = NewLIs[i]; - if (!LI->empty()) { - if (!AddedKill.count(LI)) { - LiveRange *LR = &LI->ranges[LI->ranges.size()-1]; - SlotIndex LastUseIdx = LR->end.getBaseIndex(); - MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx); - int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg, false); - assert(UseIdx != -1); - if (!LastUse->isRegTiedToDefOperand(UseIdx)) { - LastUse->getOperand(UseIdx).setIsKill(); - vrm.addKillPoint(LI->reg, LastUseIdx); - } - } - RetNewLIs.push_back(LI); + void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P, + BundleRanges& BR) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + if (NewIdx > LR->end) { + // Range extended to bundle. Add to bundle uses. + // Note: Currently adds kill flags to bundle start. + assert(BR[LI->reg].Use == 0 && + "Bundle already has use range for reg."); + moveKillFlags(LI->reg, LR->end, NewIdx); + LR->end = NewIdx.getRegSlot(); + BR[LI->reg].Use = LR; + } else { + assert(BR[LI->reg].Use != 0 && + "Bundle should already have a use range for reg."); } } - handleSpilledImpDefs(li, vrm, rc, RetNewLIs); - normalizeSpillWeights(RetNewLIs); - return RetNewLIs; -} - -/// hasAllocatableSuperReg - Return true if the specified physical register has -/// any super register that's allocatable. -bool LiveIntervals::hasAllocatableSuperReg(unsigned Reg) const { - for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) - if (allocatableRegs_[*AS] && hasInterval(*AS)) - return true; - return false; -} + void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering, + BundleRanges& BR) { + bool GoingUp = NewIdx < OldIdx; -/// getRepresentativeReg - Find the largest super register of the specified -/// physical register. -unsigned LiveIntervals::getRepresentativeReg(unsigned Reg) const { - // Find the largest super-register that is allocatable. - unsigned BestReg = Reg; - for (const unsigned* AS = tri_->getSuperRegisters(Reg); *AS; ++AS) { - unsigned SuperReg = *AS; - if (!hasAllocatableSuperReg(SuperReg) && hasInterval(SuperReg)) { - BestReg = SuperReg; - break; + if (GoingUp) { + for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); + EI != EE; ++EI) + moveEnteringUpFromInto(OldIdx, *EI, BR); + } else { + for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end(); + EI != EE; ++EI) + moveEnteringDownFromInto(OldIdx, *EI, BR); } } - return BestReg; -} -/// getNumConflictsWithPhysReg - Return the number of uses and defs of the -/// specified interval that conflicts with the specified physical register. -unsigned LiveIntervals::getNumConflictsWithPhysReg(const LiveInterval &li, - unsigned PhysReg) const { - unsigned NumConflicts = 0; - const LiveInterval &pli = getInterval(getRepresentativeReg(PhysReg)); - for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), - E = mri_->reg_end(); I != E; ++I) { - MachineOperand &O = I.getOperand(); - MachineInstr *MI = O.getParent(); - if (MI->isDebugValue()) - continue; - SlotIndex Index = getInstructionIndex(MI); - if (pli.liveAt(Index)) - ++NumConflicts; + void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P, + BundleRanges& BR) { + // TODO: Sane rules for moving ranges into bundles. } - return NumConflicts; -} -/// spillPhysRegAroundRegDefsUses - Spill the specified physical register -/// around all defs and uses of the specified interval. Return true if it -/// was able to cut its interval. -bool LiveIntervals::spillPhysRegAroundRegDefsUses(const LiveInterval &li, - unsigned PhysReg, VirtRegMap &vrm) { - unsigned SpillReg = getRepresentativeReg(PhysReg); - - for (const unsigned *AS = tri_->getAliasSet(PhysReg); *AS; ++AS) - // If there are registers which alias PhysReg, but which are not a - // sub-register of the chosen representative super register. Assert - // since we can't handle it yet. - assert(*AS == SpillReg || !allocatableRegs_[*AS] || !hasInterval(*AS) || - tri_->isSuperRegister(*AS, SpillReg)); - - bool Cut = false; - SmallVector PRegs; - if (hasInterval(SpillReg)) - PRegs.push_back(SpillReg); - else { - SmallSet Added; - for (const unsigned* AS = tri_->getSubRegisters(SpillReg); *AS; ++AS) - if (Added.insert(*AS) && hasInterval(*AS)) { - PRegs.push_back(*AS); - for (const unsigned* ASS = tri_->getSubRegisters(*AS); *ASS; ++ASS) - Added.insert(*ASS); - } + void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal, + BundleRanges& BR) { + for (RangeSet::iterator II = Internal.begin(), IE = Internal.end(); + II != IE; ++II) + moveInternalFromInto(OldIdx, *II, BR); } - SmallPtrSet SeenMIs; - for (MachineRegisterInfo::reg_iterator I = mri_->reg_begin(li.reg), - E = mri_->reg_end(); I != E; ++I) { - MachineOperand &O = I.getOperand(); - MachineInstr *MI = O.getParent(); - if (MI->isDebugValue() || SeenMIs.count(MI)) - continue; - SeenMIs.insert(MI); - SlotIndex Index = getInstructionIndex(MI); - for (unsigned i = 0, e = PRegs.size(); i != e; ++i) { - unsigned PReg = PRegs[i]; - LiveInterval &pli = getInterval(PReg); - if (!pli.liveAt(Index)) - continue; - vrm.addEmergencySpill(PReg, MI); - SlotIndex StartIdx = Index.getLoadIndex(); - SlotIndex EndIdx = Index.getNextIndex().getBaseIndex(); - if (pli.isInOneLiveRange(StartIdx, EndIdx)) { - pli.removeRange(StartIdx, EndIdx); - Cut = true; - } else { - std::string msg; - raw_string_ostream Msg(msg); - Msg << "Ran out of registers during register allocation!"; - if (MI->isInlineAsm()) { - Msg << "\nPlease check your inline asm statement for invalid " - << "constraints:\n"; - MI->print(Msg, tm_); - } - report_fatal_error(Msg.str()); + void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P, + BundleRanges& BR) { + LiveInterval* LI = P.first; + LiveRange* LR = P.second; + + assert(LR->start.isRegister() && + "Don't know how to merge exiting ECs into bundles yet."); + + if (LR->end > NewIdx.getDeadSlot()) { + // This range is becoming an exiting range on the bundle. + // If there was an old dead-def of this reg, delete it. + if (BR[LI->reg].Dead != 0) { + LI->removeRange(*BR[LI->reg].Dead); + BR[LI->reg].Dead = 0; } - for (const unsigned* AS = tri_->getSubRegisters(PReg); *AS; ++AS) { - if (!hasInterval(*AS)) - continue; - LiveInterval &spli = getInterval(*AS); - if (spli.liveAt(Index)) - spli.removeRange(Index.getLoadIndex(), - Index.getNextIndex().getBaseIndex()); + assert(BR[LI->reg].Def == 0 && + "Can't have two defs for the same variable exiting a bundle."); + LR->start = NewIdx.getRegSlot(); + LR->valno->def = LR->start; + BR[LI->reg].Def = LR; + } else { + // This range is becoming internal to the bundle. + assert(LR->end == NewIdx.getRegSlot() && + "Can't bundle def whose kill is before the bundle"); + if (BR[LI->reg].Dead || BR[LI->reg].Def) { + // Already have a def for this. Just delete range. + LI->removeRange(*LR); + } else { + // Make range dead, record. + LR->end = NewIdx.getDeadSlot(); + BR[LI->reg].Dead = LR; + assert(BR[LI->reg].Use == LR && + "Range becoming dead should currently be use."); } + // In both cases the range is no longer a use on the bundle. + BR[LI->reg].Use = 0; } } - return Cut; -} -LiveRange LiveIntervals::addLiveRangeToEndOfBlock(unsigned reg, - MachineInstr* startInst) { - LiveInterval& Interval = getOrCreateInterval(reg); - VNInfo* VN = Interval.getNextValue( - SlotIndex(getInstructionIndex(startInst).getDefIndex()), - startInst, true, getVNInfoAllocator()); - VN->setHasPHIKill(true); - LiveRange LR( - SlotIndex(getInstructionIndex(startInst).getDefIndex()), - getMBBEndIdx(startInst->getParent()), VN); - Interval.addRange(LR); + void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting, + BundleRanges& BR) { + for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end(); + EI != EE; ++EI) + moveExitingFromInto(OldIdx, *EI, BR); + } - return LR; +}; + +void LiveIntervals::handleMove(MachineInstr* MI) { + SlotIndex OldIndex = indexes_->getInstructionIndex(MI); + indexes_->removeMachineInstrFromMaps(MI); + SlotIndex NewIndex = MI->isInsideBundle() ? + indexes_->getInstructionIndex(MI) : + indexes_->insertMachineInstrInMaps(MI); + assert(getMBBStartIdx(MI->getParent()) <= OldIndex && + OldIndex < getMBBEndIdx(MI->getParent()) && + "Cannot handle moves across basic block boundaries."); + assert(!MI->isBundled() && "Can't handle bundled instructions yet."); + + HMEditor HME(*this, *mri_, *tri_, NewIndex); + HME.moveAllRangesFrom(MI, OldIndex); } +void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart) { + SlotIndex NewIndex = indexes_->getInstructionIndex(BundleStart); + HMEditor HME(*this, *mri_, *tri_, NewIndex); + HME.moveAllRangesInto(MI, BundleStart); +}