X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FRegAllocFast.cpp;h=97652036f988ab99e770103af5d9c055bb4e261c;hb=96cc1d0dfbcf9c7ffffc65f0aa008ff532d444f4;hp=110b7fc1fe158e4234ef5315e44d8005648dee23;hpb=4bf4bafcced902ee6d58a90486768f08a3795d02;p=oota-llvm.git diff --git a/lib/CodeGen/RegAllocFast.cpp b/lib/CodeGen/RegAllocFast.cpp index 110b7fc1fe1..97652036f98 100644 --- a/lib/CodeGen/RegAllocFast.cpp +++ b/lib/CodeGen/RegAllocFast.cpp @@ -13,9 +13,11 @@ //===----------------------------------------------------------------------===// #define DEBUG_TYPE "regalloc" +#include "RegisterClassInfo.h" #include "llvm/BasicBlock.h" #include "llvm/CodeGen/MachineFunctionPass.h" #include "llvm/CodeGen/MachineInstr.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" @@ -37,6 +39,7 @@ using namespace llvm; STATISTIC(NumStores, "Number of stores added"); STATISTIC(NumLoads , "Number of loads added"); +STATISTIC(NumCopies, "Number of copies coalesced"); static RegisterRegAlloc fastRegAlloc("fast", "fast register allocator", createFastRegisterAllocator); @@ -45,13 +48,21 @@ namespace { class RAFast : public MachineFunctionPass { public: static char ID; - RAFast() : MachineFunctionPass(&ID), StackSlotForVirtReg(-1) {} + RAFast() : MachineFunctionPass(ID), StackSlotForVirtReg(-1), + isBulkSpilling(false) { + initializePHIEliminationPass(*PassRegistry::getPassRegistry()); + initializeTwoAddressInstructionPassPass(*PassRegistry::getPassRegistry()); + } private: const TargetMachine *TM; MachineFunction *MF; MachineRegisterInfo *MRI; const TargetRegisterInfo *TRI; const TargetInstrInfo *TII; + RegisterClassInfo RegClassInfo; + + // Basic block currently being allocated. + MachineBasicBlock *MBB; // StackSlotForVirtReg - Maps virtual regs to the frame index where these // values are spilled. @@ -65,17 +76,18 @@ namespace { bool Dirty; // Register needs spill. LiveReg(unsigned p=0) : LastUse(0), PhysReg(p), LastOpNum(0), - Dirty(false) { - assert(p && "Don't create LiveRegs without a PhysReg"); - } + Dirty(false) {} }; typedef DenseMap LiveRegMap; + typedef LiveRegMap::value_type LiveRegEntry; // LiveVirtRegs - This map contains entries for each virtual register // that is currently available in a physical register. LiveRegMap LiveVirtRegs; + DenseMap LiveDbgValueMap; + // RegState - Track the state of a physical register. enum RegState { // A disabled register is not available for allocation, but an alias may @@ -87,7 +99,7 @@ namespace { // immediately without checking aliases. regFree, - // A reserved register has been assigned expolicitly (e.g., setting up a + // A reserved register has been assigned explicitly (e.g., setting up a // call parameter), and it remains reserved until it is used. regReserved @@ -103,9 +115,21 @@ namespace { // instruction, and so cannot be allocated. BitVector UsedInInstr; - // ReservedRegs - vector of reserved physical registers. - BitVector ReservedRegs; + // SkippedInstrs - Descriptors of instructions whose clobber list was + // ignored because all registers were spilled. It is still necessary to + // mark all the clobbered registers as used by the function. + SmallPtrSet SkippedInstrs; + // isBulkSpilling - This flag is set when LiveRegMap will be cleared + // completely after spilling all live registers. LiveRegMap entries should + // not be erased. + bool isBulkSpilling; + + enum { + spillClean = 1, + spillDirty = 100, + spillImpossible = ~0u + }; public: virtual const char *getPassName() const { return "Fast Register Allocator"; @@ -120,28 +144,29 @@ namespace { private: bool runOnMachineFunction(MachineFunction &Fn); - void AllocateBasicBlock(MachineBasicBlock &MBB); + void AllocateBasicBlock(); + void handleThroughOperands(MachineInstr *MI, + SmallVectorImpl &VirtDead); int getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC); - void addKillFlag(LiveRegMap::iterator i); - void killVirtReg(LiveRegMap::iterator i); + bool isLastUseOfLocalReg(MachineOperand&); + + void addKillFlag(const LiveReg&); + void killVirtReg(LiveRegMap::iterator); void killVirtReg(unsigned VirtReg); - void spillVirtReg(MachineBasicBlock &MBB, MachineBasicBlock::iterator MI, - unsigned VirtReg, bool isKill); - void killPhysReg(unsigned PhysReg); - void spillPhysReg(MachineBasicBlock &MBB, MachineInstr *I, - unsigned PhysReg, bool isKill); - LiveRegMap::iterator assignVirtToPhysReg(unsigned VirtReg, - unsigned PhysReg); - LiveRegMap::iterator allocVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned VirtReg, unsigned Hint); - unsigned defineVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned OpNum, unsigned VirtReg, unsigned Hint); - unsigned reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned OpNum, unsigned VirtReg, unsigned Hint); - void reservePhysReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned PhysReg); - void spillAll(MachineBasicBlock &MBB, MachineInstr *MI); - void setPhysReg(MachineOperand &MO, unsigned PhysReg); + void spillVirtReg(MachineBasicBlock::iterator MI, LiveRegMap::iterator); + void spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg); + + void usePhysReg(MachineOperand&); + void definePhysReg(MachineInstr *MI, unsigned PhysReg, RegState NewState); + unsigned calcSpillCost(unsigned PhysReg) const; + void assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg); + void allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint); + LiveRegMap::iterator defineVirtReg(MachineInstr *MI, unsigned OpNum, + unsigned VirtReg, unsigned Hint); + LiveRegMap::iterator reloadVirtReg(MachineInstr *MI, unsigned OpNum, + unsigned VirtReg, unsigned Hint); + void spillAll(MachineInstr *MI); + bool setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg); }; char RAFast::ID = 0; } @@ -163,311 +188,344 @@ int RAFast::getStackSpaceFor(unsigned VirtReg, const TargetRegisterClass *RC) { return FrameIdx; } +/// isLastUseOfLocalReg - Return true if MO is the only remaining reference to +/// its virtual register, and it is guaranteed to be a block-local register. +/// +bool RAFast::isLastUseOfLocalReg(MachineOperand &MO) { + // Check for non-debug uses or defs following MO. + // This is the most likely way to fail - fast path it. + MachineOperand *Next = &MO; + while ((Next = Next->getNextOperandForReg())) + if (!Next->isDebug()) + return false; + + // If the register has ever been spilled or reloaded, we conservatively assume + // it is a global register used in multiple blocks. + if (StackSlotForVirtReg[MO.getReg()] != -1) + return false; + + // Check that the use/def chain has exactly one operand - MO. + return &MRI->reg_nodbg_begin(MO.getReg()).getOperand() == &MO; +} + /// addKillFlag - Set kill flags on last use of a virtual register. -void RAFast::addKillFlag(LiveRegMap::iterator lri) { - assert(lri != LiveVirtRegs.end() && "Killing unmapped virtual register"); - const LiveReg &LR = lri->second; - if (LR.LastUse) { - MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum); - if (MO.isDef()) - MO.setIsDead(); - else if (!LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) +void RAFast::addKillFlag(const LiveReg &LR) { + if (!LR.LastUse) return; + MachineOperand &MO = LR.LastUse->getOperand(LR.LastOpNum); + if (MO.isUse() && !LR.LastUse->isRegTiedToDefOperand(LR.LastOpNum)) { + if (MO.getReg() == LR.PhysReg) MO.setIsKill(); - DEBUG(dbgs() << " %reg" << lri->first << " killed: " << *LR.LastUse); + else + LR.LastUse->addRegisterKilled(LR.PhysReg, TRI, true); } } /// killVirtReg - Mark virtreg as no longer available. -void RAFast::killVirtReg(LiveRegMap::iterator lri) { - addKillFlag(lri); - const LiveReg &LR = lri->second; - assert(PhysRegState[LR.PhysReg] == lri->first && "Broken RegState mapping"); +void RAFast::killVirtReg(LiveRegMap::iterator LRI) { + addKillFlag(LRI->second); + const LiveReg &LR = LRI->second; + assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping"); PhysRegState[LR.PhysReg] = regFree; - LiveVirtRegs.erase(lri); + // Erase from LiveVirtRegs unless we're spilling in bulk. + if (!isBulkSpilling) + LiveVirtRegs.erase(LRI); } /// killVirtReg - Mark virtreg as no longer available. void RAFast::killVirtReg(unsigned VirtReg) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "killVirtReg needs a virtual register"); - DEBUG(dbgs() << " Killing %reg" << VirtReg << "\n"); - LiveRegMap::iterator lri = LiveVirtRegs.find(VirtReg); - if (lri != LiveVirtRegs.end()) - killVirtReg(lri); + LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg); + if (LRI != LiveVirtRegs.end()) + killVirtReg(LRI); } /// spillVirtReg - This method spills the value specified by VirtReg into the -/// corresponding stack slot if needed. If isKill is set, the register is also -/// killed. -void RAFast::spillVirtReg(MachineBasicBlock &MBB, - MachineBasicBlock::iterator MI, - unsigned VirtReg, bool isKill) { +/// corresponding stack slot if needed. +void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, unsigned VirtReg) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Spilling a physical register is illegal!"); - LiveRegMap::iterator lri = LiveVirtRegs.find(VirtReg); - assert(lri != LiveVirtRegs.end() && "Spilling unmapped virtual register"); - LiveReg &LR = lri->second; - assert(PhysRegState[LR.PhysReg] == VirtReg && "Broken RegState mapping"); + LiveRegMap::iterator LRI = LiveVirtRegs.find(VirtReg); + assert(LRI != LiveVirtRegs.end() && "Spilling unmapped virtual register"); + spillVirtReg(MI, LRI); +} - // If this physreg is used by the instruction, we want to kill it on the - // instruction, not on the spill. - bool spillKill = isKill && LR.LastUse != MI; +/// spillVirtReg - Do the actual work of spilling. +void RAFast::spillVirtReg(MachineBasicBlock::iterator MI, + LiveRegMap::iterator LRI) { + LiveReg &LR = LRI->second; + assert(PhysRegState[LR.PhysReg] == LRI->first && "Broken RegState mapping"); if (LR.Dirty) { + // If this physreg is used by the instruction, we want to kill it on the + // instruction, not on the spill. + bool SpillKill = LR.LastUse != MI; LR.Dirty = false; - DEBUG(dbgs() << " Spilling register " << TRI->getName(LR.PhysReg) - << " containing %reg" << VirtReg); - const TargetRegisterClass *RC = MRI->getRegClass(VirtReg); - int FrameIndex = getStackSpaceFor(VirtReg, RC); - DEBUG(dbgs() << " to stack slot #" << FrameIndex << "\n"); - TII->storeRegToStackSlot(MBB, MI, LR.PhysReg, spillKill, - FrameIndex, RC, TRI); + DEBUG(dbgs() << "Spilling " << PrintReg(LRI->first, TRI) + << " in " << PrintReg(LR.PhysReg, TRI)); + const TargetRegisterClass *RC = MRI->getRegClass(LRI->first); + int FI = getStackSpaceFor(LRI->first, RC); + DEBUG(dbgs() << " to stack slot #" << FI << "\n"); + TII->storeRegToStackSlot(*MBB, MI, LR.PhysReg, SpillKill, FI, RC, TRI); ++NumStores; // Update statistics - if (spillKill) - LR.LastUse = 0; // Don't kill register again - else if (!isKill) { - MachineInstr *Spill = llvm::prior(MI); - LR.LastUse = Spill; - LR.LastOpNum = Spill->findRegisterUseOperandIdx(LR.PhysReg); + // If this register is used by DBG_VALUE then insert new DBG_VALUE to + // identify spilled location as the place to find corresponding variable's + // value. + if (MachineInstr *DBG = LiveDbgValueMap.lookup(LRI->first)) { + const MDNode *MDPtr = + DBG->getOperand(DBG->getNumOperands()-1).getMetadata(); + int64_t Offset = 0; + if (DBG->getOperand(1).isImm()) + Offset = DBG->getOperand(1).getImm(); + DebugLoc DL; + if (MI == MBB->end()) { + // If MI is at basic block end then use last instruction's location. + MachineBasicBlock::iterator EI = MI; + DL = (--EI)->getDebugLoc(); + } + else + DL = MI->getDebugLoc(); + if (MachineInstr *NewDV = + TII->emitFrameIndexDebugValue(*MF, FI, Offset, MDPtr, DL)) { + MachineBasicBlock *MBB = DBG->getParent(); + MBB->insert(MI, NewDV); + DEBUG(dbgs() << "Inserting debug info due to spill:" << "\n" << *NewDV); + LiveDbgValueMap[LRI->first] = NewDV; + } } + if (SpillKill) + LR.LastUse = 0; // Don't kill register again } - - if (isKill) - killVirtReg(lri); + killVirtReg(LRI); } /// spillAll - Spill all dirty virtregs without killing them. -void RAFast::spillAll(MachineBasicBlock &MBB, MachineInstr *MI) { - SmallVector Dirty; - for (LiveRegMap::iterator i = LiveVirtRegs.begin(), - e = LiveVirtRegs.end(); i != e; ++i) - if (i->second.Dirty) - Dirty.push_back(i->first); - for (unsigned i = 0, e = Dirty.size(); i != e; ++i) - spillVirtReg(MBB, MI, Dirty[i], false); +void RAFast::spillAll(MachineInstr *MI) { + if (LiveVirtRegs.empty()) return; + isBulkSpilling = true; + // The LiveRegMap is keyed by an unsigned (the virtreg number), so the order + // of spilling here is deterministic, if arbitrary. + for (LiveRegMap::iterator i = LiveVirtRegs.begin(), e = LiveVirtRegs.end(); + i != e; ++i) + spillVirtReg(MI, i); + LiveVirtRegs.clear(); + isBulkSpilling = false; } -/// killPhysReg - Kill any virtual register aliased by PhysReg. -void RAFast::killPhysReg(unsigned PhysReg) { - // Fast path for the normal case. - switch (unsigned VirtReg = PhysRegState[PhysReg]) { +/// usePhysReg - Handle the direct use of a physical register. +/// Check that the register is not used by a virtreg. +/// Kill the physreg, marking it free. +/// This may add implicit kills to MO->getParent() and invalidate MO. +void RAFast::usePhysReg(MachineOperand &MO) { + unsigned PhysReg = MO.getReg(); + assert(TargetRegisterInfo::isPhysicalRegister(PhysReg) && + "Bad usePhysReg operand"); + + switch (PhysRegState[PhysReg]) { case regDisabled: break; - case regFree: - return; case regReserved: PhysRegState[PhysReg] = regFree; + // Fall through + case regFree: + UsedInInstr.set(PhysReg); + MO.setIsKill(); return; default: - killVirtReg(VirtReg); - return; + // The physreg was allocated to a virtual register. That means the value we + // wanted has been clobbered. + llvm_unreachable("Instruction uses an allocated register"); } - // This is a disabled register, we have to check aliases. + // Maybe a superregister is reserved? for (const unsigned *AS = TRI->getAliasSet(PhysReg); unsigned Alias = *AS; ++AS) { - switch (unsigned VirtReg = PhysRegState[Alias]) { + switch (PhysRegState[Alias]) { case regDisabled: - case regFree: break; case regReserved: + assert(TRI->isSuperRegister(PhysReg, Alias) && + "Instruction is not using a subregister of a reserved register"); + // Leave the superregister in the working set. PhysRegState[Alias] = regFree; + UsedInInstr.set(Alias); + MO.getParent()->addRegisterKilled(Alias, TRI, true); + return; + case regFree: + if (TRI->isSuperRegister(PhysReg, Alias)) { + // Leave the superregister in the working set. + UsedInInstr.set(Alias); + MO.getParent()->addRegisterKilled(Alias, TRI, true); + return; + } + // Some other alias was in the working set - clear it. + PhysRegState[Alias] = regDisabled; break; default: - killVirtReg(VirtReg); - break; + llvm_unreachable("Instruction uses an alias of an allocated register"); } } + + // All aliases are disabled, bring register into working set. + PhysRegState[PhysReg] = regFree; + UsedInInstr.set(PhysReg); + MO.setIsKill(); } -/// spillPhysReg - Spill any dirty virtual registers that aliases PhysReg. If -/// isKill is set, they are also killed. -void RAFast::spillPhysReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned PhysReg, bool isKill) { +/// definePhysReg - Mark PhysReg as reserved or free after spilling any +/// virtregs. This is very similar to defineVirtReg except the physreg is +/// reserved instead of allocated. +void RAFast::definePhysReg(MachineInstr *MI, unsigned PhysReg, + RegState NewState) { + UsedInInstr.set(PhysReg); switch (unsigned VirtReg = PhysRegState[PhysReg]) { case regDisabled: break; + default: + spillVirtReg(MI, VirtReg); + // Fall through. case regFree: - return; case regReserved: - if (isKill) - PhysRegState[PhysReg] = regFree; + PhysRegState[PhysReg] = NewState; return; + } + + // This is a disabled register, disable all aliases. + PhysRegState[PhysReg] = NewState; + for (const unsigned *AS = TRI->getAliasSet(PhysReg); + unsigned Alias = *AS; ++AS) { + switch (unsigned VirtReg = PhysRegState[Alias]) { + case regDisabled: + break; + default: + spillVirtReg(MI, VirtReg); + // Fall through. + case regFree: + case regReserved: + PhysRegState[Alias] = regDisabled; + if (TRI->isSuperRegister(PhysReg, Alias)) + return; + break; + } + } +} + + +// calcSpillCost - Return the cost of spilling clearing out PhysReg and +// aliases so it is free for allocation. +// Returns 0 when PhysReg is free or disabled with all aliases disabled - it +// can be allocated directly. +// Returns spillImpossible when PhysReg or an alias can't be spilled. +unsigned RAFast::calcSpillCost(unsigned PhysReg) const { + if (UsedInInstr.test(PhysReg)) { + DEBUG(dbgs() << "PhysReg: " << PhysReg << " is already used in instr.\n"); + return spillImpossible; + } + switch (unsigned VirtReg = PhysRegState[PhysReg]) { + case regDisabled: + break; + case regFree: + return 0; + case regReserved: + DEBUG(dbgs() << "VirtReg: " << VirtReg << " corresponding to PhysReg: " + << PhysReg << " is reserved already.\n"); + return spillImpossible; default: - spillVirtReg(MBB, MI, VirtReg, isKill); - return; + return LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean; } - // This is a disabled register, we have to check aliases. + // This is a disabled register, add up cost of aliases. + DEBUG(dbgs() << "\tRegister: " << PhysReg << " is disabled.\n"); + unsigned Cost = 0; for (const unsigned *AS = TRI->getAliasSet(PhysReg); unsigned Alias = *AS; ++AS) { + if (UsedInInstr.test(Alias)) + return spillImpossible; switch (unsigned VirtReg = PhysRegState[Alias]) { case regDisabled: + break; case regFree: + ++Cost; break; case regReserved: - if (isKill) - PhysRegState[Alias] = regFree; - break; + return spillImpossible; default: - spillVirtReg(MBB, MI, VirtReg, isKill); + Cost += LiveVirtRegs.lookup(VirtReg).Dirty ? spillDirty : spillClean; break; } } + return Cost; } + /// assignVirtToPhysReg - This method updates local state so that we know /// that PhysReg is the proper container for VirtReg now. The physical /// register must not be used for anything else when this is called. /// -RAFast::LiveRegMap::iterator -RAFast::assignVirtToPhysReg(unsigned VirtReg, unsigned PhysReg) { - DEBUG(dbgs() << " Assigning %reg" << VirtReg << " to " - << TRI->getName(PhysReg) << "\n"); - PhysRegState[PhysReg] = VirtReg; - return LiveVirtRegs.insert(std::make_pair(VirtReg, PhysReg)).first; +void RAFast::assignVirtToPhysReg(LiveRegEntry &LRE, unsigned PhysReg) { + DEBUG(dbgs() << "Assigning " << PrintReg(LRE.first, TRI) << " to " + << PrintReg(PhysReg, TRI) << "\n"); + PhysRegState[PhysReg] = LRE.first; + assert(!LRE.second.PhysReg && "Already assigned a physreg"); + LRE.second.PhysReg = PhysReg; } /// allocVirtReg - Allocate a physical register for VirtReg. -RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineBasicBlock &MBB, - MachineInstr *MI, - unsigned VirtReg, - unsigned Hint) { - const unsigned spillCost = 100; +void RAFast::allocVirtReg(MachineInstr *MI, LiveRegEntry &LRE, unsigned Hint) { + const unsigned VirtReg = LRE.first; + assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Can only allocate virtual registers"); const TargetRegisterClass *RC = MRI->getRegClass(VirtReg); - TargetRegisterClass::iterator AOB = RC->allocation_order_begin(*MF); - TargetRegisterClass::iterator AOE = RC->allocation_order_end(*MF); // Ignore invalid hints. if (Hint && (!TargetRegisterInfo::isPhysicalRegister(Hint) || - !RC->contains(Hint) || UsedInInstr.test(Hint))) + !RC->contains(Hint) || !RegClassInfo.isAllocatable(Hint))) Hint = 0; - // If there is no hint, peek at the first use of this register. - if (!Hint && !MRI->use_nodbg_empty(VirtReg)) { - MachineInstr &MI = *MRI->use_nodbg_begin(VirtReg); - unsigned SrcReg, DstReg, SrcSubReg, DstSubReg; - // Copy to physreg -> use physreg as hint. - if (TII->isMoveInstr(MI, SrcReg, DstReg, SrcSubReg, DstSubReg) && - SrcReg == VirtReg && TargetRegisterInfo::isPhysicalRegister(DstReg) && - RC->contains(DstReg) && !UsedInInstr.test(DstReg)) { - Hint = DstReg; - DEBUG(dbgs() << " %reg" << VirtReg << " gets hint from " << MI); - } - } - // Take hint when possible. if (Hint) { - assert(RC->contains(Hint) && !UsedInInstr.test(Hint) && - "Invalid hint should have been cleared"); - switch(PhysRegState[Hint]) { - case regDisabled: - case regReserved: - break; + switch(calcSpillCost(Hint)) { default: - DEBUG(dbgs() << " %reg" << VirtReg << " really wants " - << TRI->getName(Hint) << "\n"); - spillVirtReg(MBB, MI, PhysRegState[Hint], true); + definePhysReg(MI, Hint, regFree); // Fall through. - case regFree: - return assignVirtToPhysReg(VirtReg, Hint); + case 0: + return assignVirtToPhysReg(LRE, Hint); + case spillImpossible: + break; } } + ArrayRef AO = RegClassInfo.getOrder(RC); + // First try to find a completely free register. - unsigned BestCost = 0, BestReg = 0; - bool hasDisabled = false; - for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) { + for (ArrayRef::iterator I = AO.begin(), E = AO.end(); I != E; ++I) { unsigned PhysReg = *I; - switch(PhysRegState[PhysReg]) { - case regDisabled: - hasDisabled = true; - case regReserved: - continue; - case regFree: - if (!UsedInInstr.test(PhysReg)) - return assignVirtToPhysReg(VirtReg, PhysReg); - continue; - default: - // Grab the first spillable register we meet. - if (!BestReg && !UsedInInstr.test(PhysReg)) - BestReg = PhysReg, BestCost = spillCost; - continue; - } + if (PhysRegState[PhysReg] == regFree && !UsedInInstr.test(PhysReg)) + return assignVirtToPhysReg(LRE, PhysReg); } - DEBUG(dbgs() << " Allocating %reg" << VirtReg << " from " << RC->getName() - << " candidate=" << TRI->getName(BestReg) << "\n"); - - // Try to extend the working set for RC if there were any disabled registers. - if (hasDisabled && (!BestReg || BestCost >= spillCost)) { - for (TargetRegisterClass::iterator I = AOB; I != AOE; ++I) { - unsigned PhysReg = *I; - if (PhysRegState[PhysReg] != regDisabled || UsedInInstr.test(PhysReg)) - continue; - - // Calculate the cost of bringing PhysReg into the working set. - unsigned Cost=0; - bool Impossible = false; - for (const unsigned *AS = TRI->getAliasSet(PhysReg); - unsigned Alias = *AS; ++AS) { - if (UsedInInstr.test(Alias)) { - Impossible = true; - break; - } - switch (PhysRegState[Alias]) { - case regDisabled: - break; - case regReserved: - Impossible = true; - break; - case regFree: - Cost++; - break; - default: - Cost += spillCost; - break; - } - } - if (Impossible) continue; - DEBUG(dbgs() << " - candidate " << TRI->getName(PhysReg) - << " cost=" << Cost << "\n"); - if (!BestReg || Cost < BestCost) { - BestReg = PhysReg; - BestCost = Cost; - if (Cost < spillCost) break; - } - } + DEBUG(dbgs() << "Allocating " << PrintReg(VirtReg) << " from " + << RC->getName() << "\n"); + + unsigned BestReg = 0, BestCost = spillImpossible; + for (ArrayRef::iterator I = AO.begin(), E = AO.end(); I != E; ++I) { + unsigned Cost = calcSpillCost(*I); + DEBUG(dbgs() << "\tRegister: " << *I << "\n"); + DEBUG(dbgs() << "\tCost: " << Cost << "\n"); + DEBUG(dbgs() << "\tBestCost: " << BestCost << "\n"); + // Cost is 0 when all aliases are already disabled. + if (Cost == 0) + return assignVirtToPhysReg(LRE, *I); + if (Cost < BestCost) + BestReg = *I, BestCost = Cost; } if (BestReg) { - // BestCost is 0 when all aliases are already disabled. - if (BestCost) { - if (PhysRegState[BestReg] != regDisabled) - spillVirtReg(MBB, MI, PhysRegState[BestReg], true); - else { - // Make sure all aliases are disabled. - for (const unsigned *AS = TRI->getAliasSet(BestReg); - unsigned Alias = *AS; ++AS) { - switch (PhysRegState[Alias]) { - case regDisabled: - continue; - case regFree: - PhysRegState[Alias] = regDisabled; - break; - default: - spillVirtReg(MBB, MI, PhysRegState[Alias], true); - PhysRegState[Alias] = regDisabled; - break; - } - } - } - } - return assignVirtToPhysReg(VirtReg, BestReg); + definePhysReg(MI, BestReg, regFree); + return assignVirtToPhysReg(LRE, BestReg); } // Nothing we can do. @@ -480,121 +538,245 @@ RAFast::LiveRegMap::iterator RAFast::allocVirtReg(MachineBasicBlock &MBB, MI->print(Msg, TM); } report_fatal_error(Msg.str()); - return LiveVirtRegs.end(); } /// defineVirtReg - Allocate a register for VirtReg and mark it as dirty. -unsigned RAFast::defineVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned OpNum, unsigned VirtReg, unsigned Hint) { +RAFast::LiveRegMap::iterator +RAFast::defineVirtReg(MachineInstr *MI, unsigned OpNum, + unsigned VirtReg, unsigned Hint) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual register"); - LiveRegMap::iterator lri = LiveVirtRegs.find(VirtReg); - if (lri == LiveVirtRegs.end()) - lri = allocVirtReg(MBB, MI, VirtReg, Hint); - else - addKillFlag(lri); // Kill before redefine. - LiveReg &LR = lri->second; + LiveRegMap::iterator LRI; + bool New; + tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg())); + LiveReg &LR = LRI->second; + if (New) { + // If there is no hint, peek at the only use of this register. + if ((!Hint || !TargetRegisterInfo::isPhysicalRegister(Hint)) && + MRI->hasOneNonDBGUse(VirtReg)) { + const MachineInstr &UseMI = *MRI->use_nodbg_begin(VirtReg); + // It's a copy, use the destination register as a hint. + if (UseMI.isCopyLike()) + Hint = UseMI.getOperand(0).getReg(); + } + allocVirtReg(MI, *LRI, Hint); + } else if (LR.LastUse) { + // Redefining a live register - kill at the last use, unless it is this + // instruction defining VirtReg multiple times. + if (LR.LastUse != MI || LR.LastUse->getOperand(LR.LastOpNum).isUse()) + addKillFlag(LR); + } + assert(LR.PhysReg && "Register not assigned"); LR.LastUse = MI; LR.LastOpNum = OpNum; LR.Dirty = true; UsedInInstr.set(LR.PhysReg); - return LR.PhysReg; + return LRI; } /// reloadVirtReg - Make sure VirtReg is available in a physreg and return it. -unsigned RAFast::reloadVirtReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned OpNum, unsigned VirtReg, unsigned Hint) { +RAFast::LiveRegMap::iterator +RAFast::reloadVirtReg(MachineInstr *MI, unsigned OpNum, + unsigned VirtReg, unsigned Hint) { assert(TargetRegisterInfo::isVirtualRegister(VirtReg) && "Not a virtual register"); - LiveRegMap::iterator lri = LiveVirtRegs.find(VirtReg); - if (lri == LiveVirtRegs.end()) { - lri = allocVirtReg(MBB, MI, VirtReg, Hint); + LiveRegMap::iterator LRI; + bool New; + tie(LRI, New) = LiveVirtRegs.insert(std::make_pair(VirtReg, LiveReg())); + LiveReg &LR = LRI->second; + MachineOperand &MO = MI->getOperand(OpNum); + if (New) { + allocVirtReg(MI, *LRI, Hint); const TargetRegisterClass *RC = MRI->getRegClass(VirtReg); int FrameIndex = getStackSpaceFor(VirtReg, RC); - DEBUG(dbgs() << " Reloading %reg" << VirtReg << " into " - << TRI->getName(lri->second.PhysReg) << "\n"); - TII->loadRegFromStackSlot(MBB, MI, lri->second.PhysReg, FrameIndex, RC, - TRI); + DEBUG(dbgs() << "Reloading " << PrintReg(VirtReg, TRI) << " into " + << PrintReg(LR.PhysReg, TRI) << "\n"); + TII->loadRegFromStackSlot(*MBB, MI, LR.PhysReg, FrameIndex, RC, TRI); ++NumLoads; + } else if (LR.Dirty) { + if (isLastUseOfLocalReg(MO)) { + DEBUG(dbgs() << "Killing last use: " << MO << "\n"); + if (MO.isUse()) + MO.setIsKill(); + else + MO.setIsDead(); + } else if (MO.isKill()) { + DEBUG(dbgs() << "Clearing dubious kill: " << MO << "\n"); + MO.setIsKill(false); + } else if (MO.isDead()) { + DEBUG(dbgs() << "Clearing dubious dead: " << MO << "\n"); + MO.setIsDead(false); + } + } else if (MO.isKill()) { + // We must remove kill flags from uses of reloaded registers because the + // register would be killed immediately, and there might be a second use: + // %foo = OR %x, %x + // This would cause a second reload of %x into a different register. + DEBUG(dbgs() << "Clearing clean kill: " << MO << "\n"); + MO.setIsKill(false); + } else if (MO.isDead()) { + DEBUG(dbgs() << "Clearing clean dead: " << MO << "\n"); + MO.setIsDead(false); } - LiveReg &LR = lri->second; + assert(LR.PhysReg && "Register not assigned"); LR.LastUse = MI; LR.LastOpNum = OpNum; UsedInInstr.set(LR.PhysReg); - return LR.PhysReg; + return LRI; } -/// reservePhysReg - Mark PhysReg as reserved. This is very similar to -/// defineVirtReg except the physreg is reserved instead of allocated. -void RAFast::reservePhysReg(MachineBasicBlock &MBB, MachineInstr *MI, - unsigned PhysReg) { - UsedInInstr.set(PhysReg); - switch (unsigned VirtReg = PhysRegState[PhysReg]) { - case regDisabled: - break; - case regFree: - PhysRegState[PhysReg] = regReserved; - return; - case regReserved: - return; - default: - spillVirtReg(MBB, MI, VirtReg, true); - PhysRegState[PhysReg] = regReserved; - return; +// setPhysReg - Change operand OpNum in MI the refer the PhysReg, considering +// subregs. This may invalidate any operand pointers. +// Return true if the operand kills its register. +bool RAFast::setPhysReg(MachineInstr *MI, unsigned OpNum, unsigned PhysReg) { + MachineOperand &MO = MI->getOperand(OpNum); + if (!MO.getSubReg()) { + MO.setReg(PhysReg); + return MO.isKill() || MO.isDead(); } - // This is a disabled register, disable all aliases. - for (const unsigned *AS = TRI->getAliasSet(PhysReg); - unsigned Alias = *AS; ++AS) { - UsedInInstr.set(Alias); - switch (unsigned VirtReg = PhysRegState[Alias]) { - case regDisabled: - case regFree: - break; - case regReserved: - // is a super register already reserved? - if (TRI->isSuperRegister(PhysReg, Alias)) - return; - break; - default: - spillVirtReg(MBB, MI, VirtReg, true); - break; - } - PhysRegState[Alias] = regDisabled; + // Handle subregister index. + MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, MO.getSubReg()) : 0); + MO.setSubReg(0); + + // A kill flag implies killing the full register. Add corresponding super + // register kill. + if (MO.isKill()) { + MI->addRegisterKilled(PhysReg, TRI, true); + return true; } - PhysRegState[PhysReg] = regReserved; + return MO.isDead(); } -// setPhysReg - Change MO the refer the PhysReg, considering subregs. -void RAFast::setPhysReg(MachineOperand &MO, unsigned PhysReg) { - if (unsigned Idx = MO.getSubReg()) { - MO.setReg(PhysReg ? TRI->getSubReg(PhysReg, Idx) : 0); - MO.setSubReg(0); - } else - MO.setReg(PhysReg); +// Handle special instruction operand like early clobbers and tied ops when +// there are additional physreg defines. +void RAFast::handleThroughOperands(MachineInstr *MI, + SmallVectorImpl &VirtDead) { + DEBUG(dbgs() << "Scanning for through registers:"); + SmallSet ThroughRegs; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg()) continue; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) + continue; + if (MO.isEarlyClobber() || MI->isRegTiedToDefOperand(i) || + (MO.getSubReg() && MI->readsVirtualRegister(Reg))) { + if (ThroughRegs.insert(Reg)) + DEBUG(dbgs() << ' ' << PrintReg(Reg)); + } + } + + // If any physreg defines collide with preallocated through registers, + // we must spill and reallocate. + DEBUG(dbgs() << "\nChecking for physdef collisions.\n"); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isDef()) continue; + unsigned Reg = MO.getReg(); + if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; + UsedInInstr.set(Reg); + if (ThroughRegs.count(PhysRegState[Reg])) + definePhysReg(MI, Reg, regFree); + for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) { + UsedInInstr.set(*AS); + if (ThroughRegs.count(PhysRegState[*AS])) + definePhysReg(MI, *AS, regFree); + } + } + + SmallVector PartialDefs; + DEBUG(dbgs() << "Allocating tied uses and early clobbers.\n"); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg()) continue; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; + if (MO.isUse()) { + unsigned DefIdx = 0; + if (!MI->isRegTiedToDefOperand(i, &DefIdx)) continue; + DEBUG(dbgs() << "Operand " << i << "("<< MO << ") is tied to operand " + << DefIdx << ".\n"); + LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0); + unsigned PhysReg = LRI->second.PhysReg; + setPhysReg(MI, i, PhysReg); + // Note: we don't update the def operand yet. That would cause the normal + // def-scan to attempt spilling. + } else if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) { + DEBUG(dbgs() << "Partial redefine: " << MO << "\n"); + // Reload the register, but don't assign to the operand just yet. + // That would confuse the later phys-def processing pass. + LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, 0); + PartialDefs.push_back(LRI->second.PhysReg); + } else if (MO.isEarlyClobber()) { + // Note: defineVirtReg may invalidate MO. + LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, 0); + unsigned PhysReg = LRI->second.PhysReg; + if (setPhysReg(MI, i, PhysReg)) + VirtDead.push_back(Reg); + } + } + + // Restore UsedInInstr to a state usable for allocating normal virtual uses. + UsedInInstr.reset(); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || (MO.isDef() && !MO.isEarlyClobber())) continue; + unsigned Reg = MO.getReg(); + if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; + DEBUG(dbgs() << "\tSetting reg " << Reg << " as used in instr\n"); + UsedInInstr.set(Reg); + } + + // Also mark PartialDefs as used to avoid reallocation. + for (unsigned i = 0, e = PartialDefs.size(); i != e; ++i) + UsedInInstr.set(PartialDefs[i]); } -void RAFast::AllocateBasicBlock(MachineBasicBlock &MBB) { - DEBUG(dbgs() << "\nBB#" << MBB.getNumber() << ", "<< MBB.getName() << "\n"); +void RAFast::AllocateBasicBlock() { + DEBUG(dbgs() << "\nAllocating " << *MBB); + + // FIXME: This should probably be added by instruction selection instead? + // If the last instruction in the block is a return, make sure to mark it as + // using all of the live-out values in the function. Things marked both call + // and return are tail calls; do not do this for them. The tail callee need + // not take the same registers as input that it produces as output, and there + // are dependencies for its input registers elsewhere. + if (!MBB->empty() && MBB->back().getDesc().isReturn() && + !MBB->back().getDesc().isCall()) { + MachineInstr *Ret = &MBB->back(); + + for (MachineRegisterInfo::liveout_iterator + I = MF->getRegInfo().liveout_begin(), + E = MF->getRegInfo().liveout_end(); I != E; ++I) { + assert(TargetRegisterInfo::isPhysicalRegister(*I) && + "Cannot have a live-out virtual register."); + + // Add live-out registers as implicit uses. + Ret->addRegisterKilled(*I, TRI, true); + } + } PhysRegState.assign(TRI->getNumRegs(), regDisabled); assert(LiveVirtRegs.empty() && "Mapping not cleared form last block?"); - MachineBasicBlock::iterator MII = MBB.begin(); + MachineBasicBlock::iterator MII = MBB->begin(); // Add live-in registers as live. - for (MachineBasicBlock::livein_iterator I = MBB.livein_begin(), - E = MBB.livein_end(); I != E; ++I) - reservePhysReg(MBB, MII, *I); + for (MachineBasicBlock::livein_iterator I = MBB->livein_begin(), + E = MBB->livein_end(); I != E; ++I) + if (RegClassInfo.isAllocatable(*I)) + definePhysReg(MII, *I, regReserved); - SmallVector VirtKills, PhysKills, PhysDefs; + SmallVector VirtDead; + SmallVector Coalesced; // Otherwise, sequentially allocate each instruction in the MBB. - while (MII != MBB.end()) { + while (MII != MBB->end()) { MachineInstr *MI = MII++; const TargetInstrDesc &TID = MI->getDesc(); DEBUG({ - dbgs() << "\nStarting RegAlloc of: " << *MI << "Working set:"; + dbgs() << "\n>> " << *MI << "Regs:"; for (unsigned Reg = 1, E = TRI->getNumRegs(); Reg != E; ++Reg) { if (PhysRegState[Reg] == regDisabled) continue; dbgs() << " " << TRI->getName(Reg); @@ -602,10 +784,10 @@ void RAFast::AllocateBasicBlock(MachineBasicBlock &MBB) { case regFree: break; case regReserved: - dbgs() << "(resv)"; + dbgs() << "*"; break; default: - dbgs() << "=%reg" << PhysRegState[Reg]; + dbgs() << '=' << PrintReg(PhysRegState[Reg]); if (LiveVirtRegs[PhysRegState[Reg]].Dirty) dbgs() << "*"; assert(LiveVirtRegs[PhysRegState[Reg]].PhysReg == Reg && @@ -628,188 +810,264 @@ void RAFast::AllocateBasicBlock(MachineBasicBlock &MBB) { // Debug values are not allowed to change codegen in any way. if (MI->isDebugValue()) { - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { - MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg()) continue; - unsigned Reg = MO.getReg(); - if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue; - LiveRegMap::iterator lri = LiveVirtRegs.find(Reg); - if (lri != LiveVirtRegs.end()) - setPhysReg(MO, lri->second.PhysReg); - else - MO.setReg(0); // We can't allocate a physreg for a DebugValue, sorry! + bool ScanDbgValue = true; + while (ScanDbgValue) { + ScanDbgValue = false; + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg()) continue; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; + LiveDbgValueMap[Reg] = MI; + LiveRegMap::iterator LRI = LiveVirtRegs.find(Reg); + if (LRI != LiveVirtRegs.end()) + setPhysReg(MI, i, LRI->second.PhysReg); + else { + int SS = StackSlotForVirtReg[Reg]; + if (SS == -1) { + // We can't allocate a physreg for a DebugValue, sorry! + DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE"); + MO.setReg(0); + } + else { + // Modify DBG_VALUE now that the value is in a spill slot. + int64_t Offset = MI->getOperand(1).getImm(); + const MDNode *MDPtr = + MI->getOperand(MI->getNumOperands()-1).getMetadata(); + DebugLoc DL = MI->getDebugLoc(); + if (MachineInstr *NewDV = + TII->emitFrameIndexDebugValue(*MF, SS, Offset, MDPtr, DL)) { + DEBUG(dbgs() << "Modifying debug info due to spill:" << + "\t" << *MI); + MachineBasicBlock *MBB = MI->getParent(); + MBB->insert(MBB->erase(MI), NewDV); + // Scan NewDV operands from the beginning. + MI = NewDV; + ScanDbgValue = true; + break; + } else { + // We can't allocate a physreg for a DebugValue; sorry! + DEBUG(dbgs() << "Unable to allocate vreg used by DBG_VALUE"); + MO.setReg(0); + } + } + } + } } // Next instruction. continue; } // If this is a copy, we may be able to coalesce. - unsigned CopySrc, CopyDst, CopySrcSub, CopyDstSub; - if (!TII->isMoveInstr(*MI, CopySrc, CopyDst, CopySrcSub, CopyDstSub)) - CopySrc = CopyDst = 0; + unsigned CopySrc = 0, CopyDst = 0, CopySrcSub = 0, CopyDstSub = 0; + if (MI->isCopy()) { + CopyDst = MI->getOperand(0).getReg(); + CopySrc = MI->getOperand(1).getReg(); + CopyDstSub = MI->getOperand(0).getSubReg(); + CopySrcSub = MI->getOperand(1).getSubReg(); + } // Track registers used by instruction. UsedInInstr.reset(); - PhysDefs.clear(); // First scan. // Mark physreg uses and early clobbers as used. - // Collect PhysKills. + // Find the end of the virtreg operands + unsigned VirtOpEnd = 0; + bool hasTiedOps = false; + bool hasEarlyClobbers = false; + bool hasPartialRedefs = false; + bool hasPhysDefs = false; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) continue; - - // FIXME: For now, don't trust kill flags - if (MO.isUse()) MO.setIsKill(false); - unsigned Reg = MO.getReg(); - if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg) || - ReservedRegs.test(Reg)) continue; + if (!Reg) continue; + if (TargetRegisterInfo::isVirtualRegister(Reg)) { + VirtOpEnd = i+1; + if (MO.isUse()) { + hasTiedOps = hasTiedOps || + TID.getOperandConstraint(i, TOI::TIED_TO) != -1; + } else { + if (MO.isEarlyClobber()) + hasEarlyClobbers = true; + if (MO.getSubReg() && MI->readsVirtualRegister(Reg)) + hasPartialRedefs = true; + } + continue; + } + if (!RegClassInfo.isAllocatable(Reg)) continue; if (MO.isUse()) { -#ifndef NDEBUG - // We are using a physreg directly. It had better not be clobbered by a - // virtreg. - assert(PhysRegState[Reg] <= regReserved && "Using clobbered physreg"); - if (PhysRegState[Reg] == regDisabled) - for (const unsigned *AS = TRI->getAliasSet(Reg); - unsigned Alias = *AS; ++AS) - assert(PhysRegState[Alias] <= regReserved && - "Physreg alias was clobbered"); -#endif - PhysKills.push_back(Reg); // Any clean physreg use is a kill. - UsedInInstr.set(Reg); + usePhysReg(MO); } else if (MO.isEarlyClobber()) { - spillPhysReg(MBB, MI, Reg, true); - UsedInInstr.set(Reg); - PhysDefs.push_back(Reg); - } + definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ? + regFree : regReserved); + hasEarlyClobbers = true; + } else + hasPhysDefs = true; + } + + // The instruction may have virtual register operands that must be allocated + // the same register at use-time and def-time: early clobbers and tied + // operands. If there are also physical defs, these registers must avoid + // both physical defs and uses, making them more constrained than normal + // operands. + // Similarly, if there are multiple defs and tied operands, we must make + // sure the same register is allocated to uses and defs. + // We didn't detect inline asm tied operands above, so just make this extra + // pass for all inline asm. + if (MI->isInlineAsm() || hasEarlyClobbers || hasPartialRedefs || + (hasTiedOps && (hasPhysDefs || TID.getNumDefs() > 1))) { + handleThroughOperands(MI, VirtDead); + // Don't attempt coalescing when we have funny stuff going on. + CopyDst = 0; + // Pretend we have early clobbers so the use operands get marked below. + // This is not necessary for the common case of a single tied use. + hasEarlyClobbers = true; } // Second scan. - // Allocate virtreg uses and early clobbers. - // Collect VirtKills - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + // Allocate virtreg uses. + for (unsigned i = 0; i != VirtOpEnd; ++i) { MachineOperand &MO = MI->getOperand(i); if (!MO.isReg()) continue; unsigned Reg = MO.getReg(); - if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) continue; + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; if (MO.isUse()) { - unsigned PhysReg = reloadVirtReg(MBB, MI, i, Reg, CopyDst); - if (CopySrc == Reg) - CopySrc = PhysReg; - setPhysReg(MO, PhysReg); - if (MO.isKill()) - VirtKills.push_back(Reg); - } else if (MO.isEarlyClobber()) { - unsigned PhysReg = defineVirtReg(MBB, MI, i, Reg, 0); - setPhysReg(MO, PhysReg); - PhysDefs.push_back(PhysReg); + LiveRegMap::iterator LRI = reloadVirtReg(MI, i, Reg, CopyDst); + unsigned PhysReg = LRI->second.PhysReg; + CopySrc = (CopySrc == Reg || CopySrc == PhysReg) ? PhysReg : 0; + if (setPhysReg(MI, i, PhysReg)) + killVirtReg(LRI); } } - // Process virtreg kills - for (unsigned i = 0, e = VirtKills.size(); i != e; ++i) - killVirtReg(VirtKills[i]); - VirtKills.clear(); - - // Process physreg kills - for (unsigned i = 0, e = PhysKills.size(); i != e; ++i) - killPhysReg(PhysKills[i]); - PhysKills.clear(); - MRI->addPhysRegsUsed(UsedInInstr); - // Track registers defined by instruction - early clobbers at this point. + // Track registers defined by instruction - early clobbers and tied uses at + // this point. UsedInInstr.reset(); - for (unsigned i = 0, e = PhysDefs.size(); i != e; ++i) { - unsigned PhysReg = PhysDefs[i]; - UsedInInstr.set(PhysReg); - for (const unsigned *AS = TRI->getAliasSet(PhysReg); - unsigned Alias = *AS; ++AS) - UsedInInstr.set(Alias); + if (hasEarlyClobbers) { + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg()) continue; + unsigned Reg = MO.getReg(); + if (!Reg || !TargetRegisterInfo::isPhysicalRegister(Reg)) continue; + // Look for physreg defs and tied uses. + if (!MO.isDef() && !MI->isRegTiedToDefOperand(i)) continue; + UsedInInstr.set(Reg); + for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) + UsedInInstr.set(*AS); + } + } + + unsigned DefOpEnd = MI->getNumOperands(); + if (TID.isCall()) { + // Spill all virtregs before a call. This serves two purposes: 1. If an + // exception is thrown, the landing pad is going to expect to find + // registers in their spill slots, and 2. we don't have to wade through + // all the operands on the call instruction. + DefOpEnd = VirtOpEnd; + DEBUG(dbgs() << " Spilling remaining registers before call.\n"); + spillAll(MI); + + // The imp-defs are skipped below, but we still need to mark those + // registers as used by the function. + SkippedInstrs.insert(&TID); } // Third scan. // Allocate defs and collect dead defs. - for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + for (unsigned i = 0; i != DefOpEnd; ++i) { MachineOperand &MO = MI->getOperand(i); - if (!MO.isReg() || !MO.isDef() || !MO.getReg()) continue; + if (!MO.isReg() || !MO.isDef() || !MO.getReg() || MO.isEarlyClobber()) + continue; unsigned Reg = MO.getReg(); if (TargetRegisterInfo::isPhysicalRegister(Reg)) { - if (ReservedRegs.test(Reg)) continue; - if (MO.isImplicit()) - spillPhysReg(MBB, MI, Reg, true); - else - reservePhysReg(MBB, MI, Reg); - if (MO.isDead()) - PhysKills.push_back(Reg); + if (!RegClassInfo.isAllocatable(Reg)) continue; + definePhysReg(MI, Reg, (MO.isImplicit() || MO.isDead()) ? + regFree : regReserved); continue; } - if (MO.isDead()) - VirtKills.push_back(Reg); - unsigned PhysReg = defineVirtReg(MBB, MI, i, Reg, CopySrc); - if (CopyDst == Reg) - CopyDst = PhysReg; - setPhysReg(MO, PhysReg); - } - - // Spill all dirty virtregs before a call, in case of an exception. - if (TID.isCall()) { - DEBUG(dbgs() << " Spilling remaining registers before call.\n"); - spillAll(MBB, MI); + LiveRegMap::iterator LRI = defineVirtReg(MI, i, Reg, CopySrc); + unsigned PhysReg = LRI->second.PhysReg; + if (setPhysReg(MI, i, PhysReg)) { + VirtDead.push_back(Reg); + CopyDst = 0; // cancel coalescing; + } else + CopyDst = (CopyDst == Reg || CopyDst == PhysReg) ? PhysReg : 0; } - // Process virtreg deads. - for (unsigned i = 0, e = VirtKills.size(); i != e; ++i) - killVirtReg(VirtKills[i]); - VirtKills.clear(); - - // Process physreg deads. - for (unsigned i = 0, e = PhysKills.size(); i != e; ++i) - killPhysReg(PhysKills[i]); - PhysKills.clear(); + // Kill dead defs after the scan to ensure that multiple defs of the same + // register are allocated identically. We didn't need to do this for uses + // because we are crerating our own kill flags, and they are always at the + // last use. + for (unsigned i = 0, e = VirtDead.size(); i != e; ++i) + killVirtReg(VirtDead[i]); + VirtDead.clear(); MRI->addPhysRegsUsed(UsedInInstr); + + if (CopyDst && CopyDst == CopySrc && CopyDstSub == CopySrcSub) { + DEBUG(dbgs() << "-- coalescing: " << *MI); + Coalesced.push_back(MI); + } else { + DEBUG(dbgs() << "<< " << *MI); + } } // Spill all physical registers holding virtual registers now. - DEBUG(dbgs() << "Killing live registers at end of block.\n"); - MachineBasicBlock::iterator MI = MBB.getFirstTerminator(); - while (!LiveVirtRegs.empty()) - spillVirtReg(MBB, MI, LiveVirtRegs.begin()->first, true); + DEBUG(dbgs() << "Spilling live registers at end of block.\n"); + spillAll(MBB->getFirstTerminator()); + + // Erase all the coalesced copies. We are delaying it until now because + // LiveVirtRegs might refer to the instrs. + for (unsigned i = 0, e = Coalesced.size(); i != e; ++i) + MBB->erase(Coalesced[i]); + NumCopies += Coalesced.size(); - DEBUG(MBB.dump()); + DEBUG(MBB->dump()); } /// runOnMachineFunction - Register allocate the whole function /// bool RAFast::runOnMachineFunction(MachineFunction &Fn) { - DEBUG(dbgs() << "Machine Function\n"); - DEBUG(Fn.dump()); + DEBUG(dbgs() << "********** FAST REGISTER ALLOCATION **********\n" + << "********** Function: " + << ((Value*)Fn.getFunction())->getName() << '\n'); MF = &Fn; MRI = &MF->getRegInfo(); TM = &Fn.getTarget(); TRI = TM->getRegisterInfo(); TII = TM->getInstrInfo(); - + RegClassInfo.runOnMachineFunction(Fn); UsedInInstr.resize(TRI->getNumRegs()); - ReservedRegs = TRI->getReservedRegs(*MF); // initialize the virtual->physical register map to have a 'null' // mapping for all virtual registers - unsigned LastVirtReg = MRI->getLastVirtReg(); - StackSlotForVirtReg.grow(LastVirtReg); + StackSlotForVirtReg.resize(MRI->getNumVirtRegs()); // Loop over all of the basic blocks, eliminating virtual register references - for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end(); - MBB != MBBe; ++MBB) - AllocateBasicBlock(*MBB); + for (MachineFunction::iterator MBBi = Fn.begin(), MBBe = Fn.end(); + MBBi != MBBe; ++MBBi) { + MBB = &*MBBi; + AllocateBasicBlock(); + } // Make sure the set of used physregs is closed under subreg operations. MRI->closePhysRegsUsed(*TRI); + // Add the clobber lists for all the instructions we skipped earlier. + for (SmallPtrSet::const_iterator + I = SkippedInstrs.begin(), E = SkippedInstrs.end(); I != E; ++I) + if (const unsigned *Defs = (*I)->getImplicitDefs()) + while (*Defs) + MRI->setPhysRegUsed(*Defs++); + + SkippedInstrs.clear(); StackSlotForVirtReg.clear(); + LiveDbgValueMap.clear(); return true; }