X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FRegisterCoalescer.cpp;h=0fb59d67682d3e7f58553ec49e8252f47c639495;hb=d8e141c0c19fd55834b2a10ec4909effb39e0703;hp=ba6b4569a8fb0dcfa1cb01673231b51cf727586b;hpb=cdcdfd2cab67366b1debbe36bf46c29f7fecda67;p=oota-llvm.git diff --git a/lib/CodeGen/RegisterCoalescer.cpp b/lib/CodeGen/RegisterCoalescer.cpp index ba6b4569a8f..0fb59d67682 100644 --- a/lib/CodeGen/RegisterCoalescer.cpp +++ b/lib/CodeGen/RegisterCoalescer.cpp @@ -13,42 +13,36 @@ // //===----------------------------------------------------------------------===// -#define DEBUG_TYPE "regalloc" #include "RegisterCoalescer.h" -#include "LiveDebugVariables.h" -#include "VirtRegMap.h" - -#include "llvm/Pass.h" -#include "llvm/Value.h" -#include "llvm/ADT/OwningPtr.h" #include "llvm/ADT/STLExtras.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" -#include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/LiveRangeEdit.h" #include "llvm/CodeGen/MachineFrameInfo.h" #include "llvm/CodeGen/MachineInstr.h" -#include "llvm/CodeGen/MachineInstr.h" #include "llvm/CodeGen/MachineLoopInfo.h" #include "llvm/CodeGen/MachineRegisterInfo.h" -#include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/Passes.h" #include "llvm/CodeGen/RegisterClassInfo.h" +#include "llvm/CodeGen/VirtRegMap.h" +#include "llvm/IR/Value.h" +#include "llvm/Pass.h" #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/raw_ostream.h" #include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetInstrInfo.h" #include "llvm/Target/TargetMachine.h" -#include "llvm/Target/TargetOptions.h" #include "llvm/Target/TargetRegisterInfo.h" +#include "llvm/Target/TargetSubtargetInfo.h" #include #include using namespace llvm; +#define DEBUG_TYPE "regalloc" + STATISTIC(numJoins , "Number of interval joins performed"); STATISTIC(numCrossRCs , "Number of cross class joins performed"); STATISTIC(numCommutes , "Number of instruction commuting performed"); @@ -63,6 +57,17 @@ EnableJoining("join-liveintervals", cl::desc("Coalesce copies (default=true)"), cl::init(true)); +// Temporary flag to test critical edge unsplitting. +static cl::opt +EnableJoinSplits("join-splitedges", + cl::desc("Coalesce copies on split edges (default=subtarget)"), cl::Hidden); + +// Temporary flag to test global copy optimization. +static cl::opt +EnableGlobalCopies("join-globalcopies", + cl::desc("Coalesce copies that span blocks (default=subtarget)"), + cl::init(cl::BOU_UNSET), cl::Hidden); + static cl::opt VerifyCoalescing("verify-coalescing", cl::desc("Verify machine instrs before and after register coalescing"), @@ -77,13 +82,21 @@ namespace { const TargetRegisterInfo* TRI; const TargetInstrInfo* TII; LiveIntervals *LIS; - LiveDebugVariables *LDV; const MachineLoopInfo* Loops; AliasAnalysis *AA; RegisterClassInfo RegClassInfo; + /// \brief True if the coalescer should aggressively coalesce global copies + /// in favor of keeping local copies. + bool JoinGlobalCopies; + + /// \brief True if the coalescer should aggressively coalesce fall-thru + /// blocks exclusively containing copies. + bool JoinSplitEdges; + /// WorkList - Copy instructions yet to be coalesced. SmallVector WorkList; + SmallVector LocalWorkList; /// ErasedInstrs - Set of instruction pointers that have been erased, and /// that may be present in WorkList. @@ -99,7 +112,10 @@ namespace { void eliminateDeadDefs(); /// LiveRangeEdit callback. - void LRE_WillEraseInstruction(MachineInstr *MI); + void LRE_WillEraseInstruction(MachineInstr *MI) override; + + /// coalesceLocals - coalesce the LocalWorkList. + void coalesceLocals(); /// joinAllIntervals - join compatible live intervals void joinAllIntervals(); @@ -108,9 +124,9 @@ namespace { /// copies that cannot yet be coalesced into WorkList. void copyCoalesceInMBB(MachineBasicBlock *MBB); - /// copyCoalesceWorkList - Try to coalesce all copies in WorkList after - /// position From. Return true if any progress was made. - bool copyCoalesceWorkList(unsigned From = 0); + /// copyCoalesceWorkList - Try to coalesce all copies in CurrList. Return + /// true if any progress was made. + bool copyCoalesceWorkList(MutableArrayRef CurrList); /// joinCopy - Attempt to join intervals corresponding to SrcReg/DstReg, /// which are the src/dst of the copy instruction CopyMI. This returns @@ -150,11 +166,11 @@ namespace { /// reMaterializeTrivialDef - If the source of a copy is defined by a /// trivial computation, replace the copy by rematerialize the definition. - bool reMaterializeTrivialDef(LiveInterval &SrcInt, unsigned DstReg, - MachineInstr *CopyMI); + bool reMaterializeTrivialDef(CoalescerPair &CP, MachineInstr *CopyMI, + bool &IsDefCopy); /// canJoinPhys - Return true if a physreg copy should be joined. - bool canJoinPhys(CoalescerPair &CP); + bool canJoinPhys(const CoalescerPair &CP); /// updateRegDefsUses - Replace all defs and uses of SrcReg to DstReg and /// update the subregister number if it is not zero. If DstReg is a @@ -172,15 +188,15 @@ namespace { initializeRegisterCoalescerPass(*PassRegistry::getPassRegistry()); } - virtual void getAnalysisUsage(AnalysisUsage &AU) const; + void getAnalysisUsage(AnalysisUsage &AU) const override; - virtual void releaseMemory(); + void releaseMemory() override; /// runOnMachineFunction - pass entry point - virtual bool runOnMachineFunction(MachineFunction&); + bool runOnMachineFunction(MachineFunction&) override; /// print - Implement the dump method. - virtual void print(raw_ostream &O, const Module* = 0) const; + void print(raw_ostream &O, const Module* = nullptr) const override; }; } /// end anonymous namespace @@ -189,7 +205,6 @@ char &llvm::RegisterCoalescerID = RegisterCoalescer::ID; INITIALIZE_PASS_BEGIN(RegisterCoalescer, "simple-register-coalescing", "Simple Register Coalescing", false, false) INITIALIZE_PASS_DEPENDENCY(LiveIntervals) -INITIALIZE_PASS_DEPENDENCY(LiveDebugVariables) INITIALIZE_PASS_DEPENDENCY(SlotIndexes) INITIALIZE_PASS_DEPENDENCY(MachineLoopInfo) INITIALIZE_AG_DEPENDENCY(AliasAnalysis) @@ -198,12 +213,6 @@ INITIALIZE_PASS_END(RegisterCoalescer, "simple-register-coalescing", char RegisterCoalescer::ID = 0; -static unsigned compose(const TargetRegisterInfo &tri, unsigned a, unsigned b) { - if (!a) return b; - if (!b) return a; - return tri.composeSubRegIndices(a, b); -} - static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, unsigned &Src, unsigned &Dst, unsigned &SrcSub, unsigned &DstSub) { @@ -214,8 +223,8 @@ static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, SrcSub = MI->getOperand(1).getSubReg(); } else if (MI->isSubregToReg()) { Dst = MI->getOperand(0).getReg(); - DstSub = compose(tri, MI->getOperand(0).getSubReg(), - MI->getOperand(3).getImm()); + DstSub = tri.composeSubRegIndices(MI->getOperand(0).getSubReg(), + MI->getOperand(3).getImm()); Src = MI->getOperand(2).getReg(); SrcSub = MI->getOperand(2).getSubReg(); } else @@ -223,10 +232,27 @@ static bool isMoveInstr(const TargetRegisterInfo &tri, const MachineInstr *MI, return true; } +// Return true if this block should be vacated by the coalescer to eliminate +// branches. The important cases to handle in the coalescer are critical edges +// split during phi elimination which contain only copies. Simple blocks that +// contain non-branches should also be vacated, but this can be handled by an +// earlier pass similar to early if-conversion. +static bool isSplitEdge(const MachineBasicBlock *MBB) { + if (MBB->pred_size() != 1 || MBB->succ_size() != 1) + return false; + + for (MachineBasicBlock::const_iterator MII = MBB->begin(), E = MBB->end(); + MII != E; ++MII) { + if (!MII->isCopyLike() && !MII->isUnconditionalBranch()) + return false; + } + return true; +} + bool CoalescerPair::setRegisters(const MachineInstr *MI) { SrcReg = DstReg = 0; SrcIdx = DstIdx = 0; - NewRC = 0; + NewRC = nullptr; Flipped = CrossClass = false; unsigned Src, Dst, SrcSub, DstSub; @@ -257,7 +283,6 @@ bool CoalescerPair::setRegisters(const MachineInstr *MI) { if (SrcSub) { Dst = TRI.getMatchingSuperReg(Dst, SrcSub, MRI.getRegClass(Src)); if (!Dst) return false; - SrcSub = 0; } else if (!MRI.getRegClass(Src)->contains(Dst)) { return false; } @@ -354,7 +379,8 @@ bool CoalescerPair::isCoalescable(const MachineInstr *MI) const { if (DstReg != Dst) return false; // Registers match, do the subregisters line up? - return compose(TRI, SrcIdx, SrcSub) == compose(TRI, DstIdx, DstSub); + return TRI.composeSubRegIndices(SrcIdx, SrcSub) == + TRI.composeSubRegIndices(DstIdx, DstSub); } } @@ -363,8 +389,6 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const { AU.addRequired(); AU.addRequired(); AU.addPreserved(); - AU.addRequired(); - AU.addPreserved(); AU.addPreserved(); AU.addRequired(); AU.addPreserved(); @@ -373,8 +397,9 @@ void RegisterCoalescer::getAnalysisUsage(AnalysisUsage &AU) const { } void RegisterCoalescer::eliminateDeadDefs() { - SmallVector NewRegs; - LiveRangeEdit(0, NewRegs, *MF, *LIS, 0, this).eliminateDeadDefs(DeadDefs); + SmallVector NewRegs; + LiveRangeEdit(nullptr, NewRegs, *MF, *LIS, + nullptr, this).eliminateDeadDefs(DeadDefs); } // Callback from eliminateDeadDefs(). @@ -409,11 +434,11 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP, LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(); - // BValNo is a value number in B that is defined by a copy from A. 'B3' in + // BValNo is a value number in B that is defined by a copy from A. 'B1' in // the example above. - LiveInterval::iterator BLR = IntB.FindLiveRangeContaining(CopyIdx); - if (BLR == IntB.end()) return false; - VNInfo *BValNo = BLR->valno; + LiveInterval::iterator BS = IntB.FindSegmentContaining(CopyIdx); + if (BS == IntB.end()) return false; + VNInfo *BValNo = BS->valno; // Get the location that B is defined at. Two options: either this value has // an unknown definition point or it is defined at CopyIdx. If unknown, we @@ -422,65 +447,66 @@ bool RegisterCoalescer::adjustCopiesBackFrom(const CoalescerPair &CP, // AValNo is the value number in A that defines the copy, A3 in the example. SlotIndex CopyUseIdx = CopyIdx.getRegSlot(true); - LiveInterval::iterator ALR = IntA.FindLiveRangeContaining(CopyUseIdx); - // The live range might not exist after fun with physreg coalescing. - if (ALR == IntA.end()) return false; - VNInfo *AValNo = ALR->valno; + LiveInterval::iterator AS = IntA.FindSegmentContaining(CopyUseIdx); + // The live segment might not exist after fun with physreg coalescing. + if (AS == IntA.end()) return false; + VNInfo *AValNo = AS->valno; // If AValNo is defined as a copy from IntB, we can potentially process this. // Get the instruction that defines this value number. MachineInstr *ACopyMI = LIS->getInstructionFromIndex(AValNo->def); - if (!CP.isCoalescable(ACopyMI)) + // Don't allow any partial copies, even if isCoalescable() allows them. + if (!CP.isCoalescable(ACopyMI) || !ACopyMI->isFullCopy()) return false; - // Get the LiveRange in IntB that this value number starts with. - LiveInterval::iterator ValLR = - IntB.FindLiveRangeContaining(AValNo->def.getPrevSlot()); - if (ValLR == IntB.end()) + // Get the Segment in IntB that this value number starts with. + LiveInterval::iterator ValS = + IntB.FindSegmentContaining(AValNo->def.getPrevSlot()); + if (ValS == IntB.end()) return false; - // Make sure that the end of the live range is inside the same block as + // Make sure that the end of the live segment is inside the same block as // CopyMI. - MachineInstr *ValLREndInst = - LIS->getInstructionFromIndex(ValLR->end.getPrevSlot()); - if (!ValLREndInst || ValLREndInst->getParent() != CopyMI->getParent()) + MachineInstr *ValSEndInst = + LIS->getInstructionFromIndex(ValS->end.getPrevSlot()); + if (!ValSEndInst || ValSEndInst->getParent() != CopyMI->getParent()) return false; - // Okay, we now know that ValLR ends in the same block that the CopyMI - // live-range starts. If there are no intervening live ranges between them in - // IntB, we can merge them. - if (ValLR+1 != BLR) return false; + // Okay, we now know that ValS ends in the same block that the CopyMI + // live-range starts. If there are no intervening live segments between them + // in IntB, we can merge them. + if (ValS+1 != BS) return false; DEBUG(dbgs() << "Extending: " << PrintReg(IntB.reg, TRI)); - SlotIndex FillerStart = ValLR->end, FillerEnd = BLR->start; + SlotIndex FillerStart = ValS->end, FillerEnd = BS->start; // We are about to delete CopyMI, so need to remove it as the 'instruction // that defines this value #'. Update the valnum with the new defining // instruction #. BValNo->def = FillerStart; // Okay, we can merge them. We need to insert a new liverange: - // [ValLR.end, BLR.begin) of either value number, then we merge the + // [ValS.end, BS.begin) of either value number, then we merge the // two value numbers. - IntB.addRange(LiveRange(FillerStart, FillerEnd, BValNo)); + IntB.addSegment(LiveInterval::Segment(FillerStart, FillerEnd, BValNo)); // Okay, merge "B1" into the same value number as "B0". - if (BValNo != ValLR->valno) - IntB.MergeValueNumberInto(BValNo, ValLR->valno); + if (BValNo != ValS->valno) + IntB.MergeValueNumberInto(BValNo, ValS->valno); DEBUG(dbgs() << " result = " << IntB << '\n'); // If the source instruction was killing the source register before the // merge, unset the isKill marker given the live range has been extended. - int UIdx = ValLREndInst->findRegisterUseOperandIdx(IntB.reg, true); + int UIdx = ValSEndInst->findRegisterUseOperandIdx(IntB.reg, true); if (UIdx != -1) { - ValLREndInst->getOperand(UIdx).setIsKill(false); + ValSEndInst->getOperand(UIdx).setIsKill(false); } // Rewrite the copy. If the copy instruction was killing the destination // register before the merge, find the last use and trim the live range. That // will also add the isKill marker. CopyMI->substituteRegister(IntA.reg, IntB.reg, 0, *TRI); - if (ALR->end == CopyIdx) + if (AS->end == CopyIdx) LIS->shrinkToUses(&IntA); ++numExtends; @@ -501,11 +527,11 @@ bool RegisterCoalescer::hasOtherReachingDefs(LiveInterval &IntA, for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end(); AI != AE; ++AI) { if (AI->valno != AValNo) continue; - LiveInterval::Ranges::iterator BI = - std::upper_bound(IntB.ranges.begin(), IntB.ranges.end(), AI->start); - if (BI != IntB.ranges.begin()) + LiveInterval::iterator BI = + std::upper_bound(IntB.begin(), IntB.end(), AI->start); + if (BI != IntB.begin()) --BI; - for (; BI != IntB.ranges.end() && AI->end >= BI->start; ++BI) { + for (; BI != IntB.end() && AI->end >= BI->start; ++BI) { if (BI->valno == BValNo) continue; if (BI->start <= AI->start && BI->end > AI->start) @@ -551,14 +577,12 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, LiveInterval &IntB = LIS->getInterval(CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg()); - // BValNo is a value number in B that is defined by a copy from A. 'B3' in + // BValNo is a value number in B that is defined by a copy from A. 'B1' in // the example above. VNInfo *BValNo = IntB.getVNInfoAt(CopyIdx); if (!BValNo || BValNo->def != CopyIdx) return false; - assert(BValNo->def == CopyIdx && "Copy doesn't define the value?"); - // AValNo is the value number in A that defines the copy, A3 in the example. VNInfo *AValNo = IntA.getVNInfoAt(CopyIdx.getRegSlot(true)); assert(AValNo && "COPY source not live"); @@ -588,7 +612,7 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx); unsigned NewReg = NewDstMO.getReg(); - if (NewReg != IntB.reg || !LiveRangeQuery(IntB, AValNo->def).isKill()) + if (NewReg != IntB.reg || !IntB.Query(AValNo->def).isKill()) return false; // Make sure there are no other definitions of IntB that would reach the @@ -598,16 +622,15 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, // If some of the uses of IntA.reg is already coalesced away, return false. // It's not possible to determine whether it's safe to perform the coalescing. - for (MachineRegisterInfo::use_nodbg_iterator UI = - MRI->use_nodbg_begin(IntA.reg), - UE = MRI->use_nodbg_end(); UI != UE; ++UI) { - MachineInstr *UseMI = &*UI; + for (MachineOperand &MO : MRI->use_nodbg_operands(IntA.reg)) { + MachineInstr *UseMI = MO.getParent(); + unsigned OpNo = &MO - &UseMI->getOperand(0); SlotIndex UseIdx = LIS->getInstructionIndex(UseMI); - LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx); - if (ULR == IntA.end() || ULR->valno != AValNo) + LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx); + if (US == IntA.end() || US->valno != AValNo) continue; // If this use is tied to a def, we can't rewrite the register. - if (UseMI->isRegTiedToDefOperand(UI.getOperandNo())) + if (UseMI->isRegTiedToDefOperand(OpNo)) return false; } @@ -645,8 +668,8 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, // Update uses of IntA of the specific Val# with IntB. for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(IntA.reg), UE = MRI->use_end(); UI != UE;) { - MachineOperand &UseMO = UI.getOperand(); - MachineInstr *UseMI = &*UI; + MachineOperand &UseMO = *UI; + MachineInstr *UseMI = UseMO.getParent(); ++UI; if (UseMI->isDebugValue()) { // FIXME These don't have an instruction index. Not clear we have enough @@ -655,8 +678,8 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, continue; } SlotIndex UseIdx = LIS->getInstructionIndex(UseMI).getRegSlot(true); - LiveInterval::iterator ULR = IntA.FindLiveRangeContaining(UseIdx); - if (ULR == IntA.end() || ULR->valno != AValNo) + LiveInterval::iterator US = IntA.FindSegmentContaining(UseIdx); + if (US == IntA.end() || US->valno != AValNo) continue; // Kill flags are no longer accurate. They are recomputed after RA. UseMO.setIsKill(false); @@ -686,14 +709,14 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, UseMI->eraseFromParent(); } - // Extend BValNo by merging in IntA live ranges of AValNo. Val# definition + // Extend BValNo by merging in IntA live segments of AValNo. Val# definition // is updated. VNInfo *ValNo = BValNo; ValNo->def = AValNo->def; for (LiveInterval::iterator AI = IntA.begin(), AE = IntA.end(); AI != AE; ++AI) { if (AI->valno != AValNo) continue; - IntB.addRange(LiveRange(AI->start, AI->end, ValNo)); + IntB.addSegment(LiveInterval::Segment(AI->start, AI->end, ValNo)); } DEBUG(dbgs() << "\t\textended: " << IntB << '\n'); @@ -705,19 +728,30 @@ bool RegisterCoalescer::removeCopyByCommutingDef(const CoalescerPair &CP, /// reMaterializeTrivialDef - If the source of a copy is defined by a trivial /// computation, replace the copy by rematerialize the definition. -bool RegisterCoalescer::reMaterializeTrivialDef(LiveInterval &SrcInt, - unsigned DstReg, - MachineInstr *CopyMI) { - SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI).getRegSlot(true); - LiveInterval::iterator SrcLR = SrcInt.FindLiveRangeContaining(CopyIdx); - assert(SrcLR != SrcInt.end() && "Live range not found!"); - VNInfo *ValNo = SrcLR->valno; +bool RegisterCoalescer::reMaterializeTrivialDef(CoalescerPair &CP, + MachineInstr *CopyMI, + bool &IsDefCopy) { + IsDefCopy = false; + unsigned SrcReg = CP.isFlipped() ? CP.getDstReg() : CP.getSrcReg(); + unsigned SrcIdx = CP.isFlipped() ? CP.getDstIdx() : CP.getSrcIdx(); + unsigned DstReg = CP.isFlipped() ? CP.getSrcReg() : CP.getDstReg(); + unsigned DstIdx = CP.isFlipped() ? CP.getSrcIdx() : CP.getDstIdx(); + if (TargetRegisterInfo::isPhysicalRegister(SrcReg)) + return false; + + LiveInterval &SrcInt = LIS->getInterval(SrcReg); + SlotIndex CopyIdx = LIS->getInstructionIndex(CopyMI); + VNInfo *ValNo = SrcInt.Query(CopyIdx).valueIn(); + assert(ValNo && "CopyMI input register not live"); if (ValNo->isPHIDef() || ValNo->isUnused()) return false; MachineInstr *DefMI = LIS->getInstructionFromIndex(ValNo->def); if (!DefMI) return false; - assert(DefMI && "Defining instruction disappeared"); + if (DefMI->isCopyLike()) { + IsDefCopy = true; + return false; + } if (!DefMI->isAsCheapAsAMove()) return false; if (!TII->isTriviallyReMaterializable(DefMI, AA)) @@ -728,23 +762,51 @@ bool RegisterCoalescer::reMaterializeTrivialDef(LiveInterval &SrcInt, const MCInstrDesc &MCID = DefMI->getDesc(); if (MCID.getNumDefs() != 1) return false; + // Only support subregister destinations when the def is read-undef. + MachineOperand &DstOperand = CopyMI->getOperand(0); + unsigned CopyDstReg = DstOperand.getReg(); + if (DstOperand.getSubReg() && !DstOperand.isUndef()) + return false; + + // If both SrcIdx and DstIdx are set, correct rematerialization would widen + // the register substantially (beyond both source and dest size). This is bad + // for performance since it can cascade through a function, introducing many + // extra spills and fills (e.g. ARM can easily end up copying QQQQPR registers + // around after a few subreg copies). + if (SrcIdx && DstIdx) + return false; + + const TargetRegisterClass *DefRC = TII->getRegClass(MCID, 0, TRI, *MF); if (!DefMI->isImplicitDef()) { - // Make sure the copy destination register class fits the instruction - // definition register class. The mismatch can happen as a result of earlier - // extract_subreg, insert_subreg, subreg_to_reg coalescing. - const TargetRegisterClass *RC = TII->getRegClass(MCID, 0, TRI, *MF); - if (TargetRegisterInfo::isVirtualRegister(DstReg)) { - if (MRI->getRegClass(DstReg) != RC) + if (TargetRegisterInfo::isPhysicalRegister(DstReg)) { + unsigned NewDstReg = DstReg; + + unsigned NewDstIdx = TRI->composeSubRegIndices(CP.getSrcIdx(), + DefMI->getOperand(0).getSubReg()); + if (NewDstIdx) + NewDstReg = TRI->getSubReg(DstReg, NewDstIdx); + + // Finally, make sure that the physical subregister that will be + // constructed later is permitted for the instruction. + if (!DefRC->contains(NewDstReg)) return false; - } else if (!RC->contains(DstReg)) - return false; + } else { + // Theoretically, some stack frame reference could exist. Just make sure + // it hasn't actually happened. + assert(TargetRegisterInfo::isVirtualRegister(DstReg) && + "Only expect to deal with virtual or physical registers"); + } } MachineBasicBlock *MBB = CopyMI->getParent(); MachineBasicBlock::iterator MII = - llvm::next(MachineBasicBlock::iterator(CopyMI)); - TII->reMaterialize(*MBB, MII, DstReg, 0, DefMI, *TRI); - MachineInstr *NewMI = prior(MII); + std::next(MachineBasicBlock::iterator(CopyMI)); + TII->reMaterialize(*MBB, MII, DstReg, SrcIdx, DefMI, *TRI); + MachineInstr *NewMI = std::prev(MII); + + LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI); + CopyMI->eraseFromParent(); + ErasedInstrs.insert(CopyMI); // NewMI may have dead implicit defs (E.g. EFLAGS for MOVr0 on X86). // We need to remember these so we can add intervals once we insert @@ -760,6 +822,56 @@ bool RegisterCoalescer::reMaterializeTrivialDef(LiveInterval &SrcInt, } } + if (TargetRegisterInfo::isVirtualRegister(DstReg)) { + const TargetRegisterClass *NewRC = CP.getNewRC(); + unsigned NewIdx = NewMI->getOperand(0).getSubReg(); + + if (NewIdx) + NewRC = TRI->getMatchingSuperRegClass(NewRC, DefRC, NewIdx); + else + NewRC = TRI->getCommonSubClass(NewRC, DefRC); + + assert(NewRC && "subreg chosen for remat incompatible with instruction"); + MRI->setRegClass(DstReg, NewRC); + + updateRegDefsUses(DstReg, DstReg, DstIdx); + NewMI->getOperand(0).setSubReg(NewIdx); + } else if (NewMI->getOperand(0).getReg() != CopyDstReg) { + // The New instruction may be defining a sub-register of what's actually + // been asked for. If so it must implicitly define the whole thing. + assert(TargetRegisterInfo::isPhysicalRegister(DstReg) && + "Only expect virtual or physical registers in remat"); + NewMI->getOperand(0).setIsDead(true); + NewMI->addOperand(MachineOperand::CreateReg(CopyDstReg, + true /*IsDef*/, + true /*IsImp*/, + false /*IsKill*/)); + // Record small dead def live-ranges for all the subregisters + // of the destination register. + // Otherwise, variables that live through may miss some + // interferences, thus creating invalid allocation. + // E.g., i386 code: + // vreg1 = somedef ; vreg1 GR8 + // vreg2 = remat ; vreg2 GR32 + // CL = COPY vreg2.sub_8bit + // = somedef vreg1 ; vreg1 GR8 + // => + // vreg1 = somedef ; vreg1 GR8 + // ECX = remat ; CL + // = somedef vreg1 ; vreg1 GR8 + // vreg1 will see the inteferences with CL but not with CH since + // no live-ranges would have been created for ECX. + // Fix that! + SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); + for (MCRegUnitIterator Units(NewMI->getOperand(0).getReg(), TRI); + Units.isValid(); ++Units) + if (LiveRange *LR = LIS->getCachedRegUnit(*Units)) + LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); + } + + if (NewMI->getOperand(0).getSubReg()) + NewMI->getOperand(0).setIsUndef(); + // CopyMI may have implicit operands, transfer them over to the newly // rematerialized instruction. And update implicit def interval valnos. for (unsigned i = CopyMI->getDesc().getNumOperands(), @@ -774,18 +886,14 @@ bool RegisterCoalescer::reMaterializeTrivialDef(LiveInterval &SrcInt, } } - LIS->ReplaceMachineInstrInMaps(CopyMI, NewMI); - SlotIndex NewMIIdx = LIS->getInstructionIndex(NewMI); for (unsigned i = 0, e = NewMIImplDefs.size(); i != e; ++i) { unsigned Reg = NewMIImplDefs[i]; for (MCRegUnitIterator Units(Reg, TRI); Units.isValid(); ++Units) - if (LiveInterval *LI = LIS->getCachedRegUnit(*Units)) - LI->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); + if (LiveRange *LR = LIS->getCachedRegUnit(*Units)) + LR->createDeadDef(NewMIIdx.getRegSlot(), LIS->getVNInfoAllocator()); } - CopyMI->eraseFromParent(); - ErasedInstrs.insert(CopyMI); DEBUG(dbgs() << "Remat: " << *NewMI); ++NumReMats; @@ -817,17 +925,14 @@ bool RegisterCoalescer::eliminateUndefCopy(MachineInstr *CopyMI, // No intervals are live-in to CopyMI - it is undef. if (CP.isFlipped()) DstInt = SrcInt; - SrcInt = 0; + SrcInt = nullptr; VNInfo *DeadVNI = DstInt->getVNInfoAt(Idx.getRegSlot()); assert(DeadVNI && "No value defined in DstInt"); DstInt->removeValNo(DeadVNI); // Find new undef uses. - for (MachineRegisterInfo::reg_nodbg_iterator - I = MRI->reg_nodbg_begin(DstInt->reg), E = MRI->reg_nodbg_end(); - I != E; ++I) { - MachineOperand &MO = I.getOperand(); + for (MachineOperand &MO : MRI->reg_nodbg_operands(DstInt->reg)) { if (MO.isDef() || MO.isUndef()) continue; MachineInstr *MI = MO.getParent(); @@ -849,16 +954,25 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg, unsigned DstReg, unsigned SubIdx) { bool DstIsPhys = TargetRegisterInfo::isPhysicalRegister(DstReg); - LiveInterval *DstInt = DstIsPhys ? 0 : &LIS->getInterval(DstReg); - - // Update LiveDebugVariables. - LDV->renameRegister(SrcReg, DstReg, SubIdx); + LiveInterval *DstInt = DstIsPhys ? nullptr : &LIS->getInterval(DstReg); + + SmallPtrSet Visited; + for (MachineRegisterInfo::reg_instr_iterator + I = MRI->reg_instr_begin(SrcReg), E = MRI->reg_instr_end(); + I != E; ) { + MachineInstr *UseMI = &*(I++); + + // Each instruction can only be rewritten once because sub-register + // composition is not always idempotent. When SrcReg != DstReg, rewriting + // the UseMI operands removes them from the SrcReg use-def chain, but when + // SrcReg is DstReg we could encounter UseMI twice if it has multiple + // operands mentioning the virtual register. + if (SrcReg == DstReg && !Visited.insert(UseMI)) + continue; - for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(SrcReg); - MachineInstr *UseMI = I.skipInstruction();) { SmallVector Ops; bool Reads, Writes; - tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops); + std::tie(Reads, Writes) = UseMI->readsWritesVirtualRegister(SrcReg, &Ops); // If SrcReg wasn't read, it may still be the case that DstReg is live-in // because SrcReg is a sub-register. @@ -891,7 +1005,7 @@ void RegisterCoalescer::updateRegDefsUses(unsigned SrcReg, } /// canJoinPhys - Return true if a copy involving a physreg should be joined. -bool RegisterCoalescer::canJoinPhys(CoalescerPair &CP) { +bool RegisterCoalescer::canJoinPhys(const CoalescerPair &CP) { /// Always join simple intervals that are defined by a single copy from a /// reserved register. This doesn't increase register pressure, so it is /// always beneficial. @@ -948,7 +1062,7 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { if (CP.getSrcReg() == CP.getDstReg()) { LiveInterval &LI = LIS->getInterval(CP.getSrcReg()); DEBUG(dbgs() << "\tCopy already coalesced: " << LI << '\n'); - LiveRangeQuery LRQ(LI, LIS->getInstructionIndex(CopyMI)); + LiveQueryResult LRQ = LI.Query(LIS->getInstructionIndex(CopyMI)); if (VNInfo *DefVNI = LRQ.valueDefined()) { VNInfo *ReadVNI = LRQ.valueIn(); assert(ReadVNI && "No value before copy and no flag."); @@ -969,10 +1083,11 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { if (!canJoinPhys(CP)) { // Before giving up coalescing, if definition of source is defined by // trivial computation, try rematerializing it. - if (!CP.isFlipped() && - reMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()), - CP.getDstReg(), CopyMI)) + bool IsDefCopy; + if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) return true; + if (IsDefCopy) + Again = true; // May be possible to coalesce later. return false; } } else { @@ -990,8 +1105,8 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { }); // When possible, let DstReg be the larger interval. - if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).ranges.size() > - LIS->getInterval(CP.getDstReg()).ranges.size()) + if (!CP.isPartial() && LIS->getInterval(CP.getSrcReg()).size() > + LIS->getInterval(CP.getDstReg()).size()) CP.flip(); } @@ -1004,12 +1119,12 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { // If definition of source is defined by trivial computation, try // rematerializing it. - if (!CP.isFlipped() && - reMaterializeTrivialDef(LIS->getInterval(CP.getSrcReg()), - CP.getDstReg(), CopyMI)) + bool IsDefCopy; + if (reMaterializeTrivialDef(CP, CopyMI, IsDefCopy)) return true; - // If we can eliminate the copy without merging the live ranges, do so now. + // If we can eliminate the copy without merging the live segments, do so + // now. if (!CP.isPartial() && !CP.isPhys()) { if (adjustCopiesBackFrom(CP, CopyMI) || removeCopyByCommutingDef(CP, CopyMI)) { @@ -1057,10 +1172,12 @@ bool RegisterCoalescer::joinCopy(MachineInstr *CopyMI, bool &Again) { TRI->UpdateRegAllocHint(CP.getSrcReg(), CP.getDstReg(), *MF); DEBUG({ - dbgs() << "\tJoined. Result = " << PrintReg(CP.getDstReg(), TRI); - if (!CP.isPhys()) + dbgs() << "\tJoined. Result = "; + if (CP.isPhys()) + dbgs() << PrintReg(CP.getDstReg(), TRI); + else dbgs() << LIS->getInterval(CP.getDstReg()); - dbgs() << '\n'; + dbgs() << '\n'; }); ++numJoins; @@ -1072,8 +1189,7 @@ bool RegisterCoalescer::joinReservedPhysReg(CoalescerPair &CP) { assert(CP.isPhys() && "Must be a physreg copy"); assert(MRI->isReserved(CP.getDstReg()) && "Not a reserved register"); LiveInterval &RHS = LIS->getInterval(CP.getSrcReg()); - DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS - << '\n'); + DEBUG(dbgs() << "\t\tRHS = " << RHS << '\n'); assert(CP.isFlipped() && RHS.containsOneValue() && "Invalid join with reserved register"); @@ -1241,8 +1357,18 @@ class JoinVals { // Value in the other live range that overlaps this def, if any. VNInfo *OtherVNI; - // Is this value an IMPLICIT_DEF? - bool IsImplicitDef; + // Is this value an IMPLICIT_DEF that can be erased? + // + // IMPLICIT_DEF values should only exist at the end of a basic block that + // is a predecessor to a phi-value. These IMPLICIT_DEF instructions can be + // safely erased if they are overlapping a live value in the other live + // interval. + // + // Weird control flow graphs and incomplete PHI handling in + // ProcessImplicitDefs can very rarely create IMPLICIT_DEF values with + // longer live ranges. Such IMPLICIT_DEF values should be treated like + // normal values. + bool ErasableImplicitDef; // True when the live range of this value will be pruned because of an // overlapping CR_Replace value in the other live range. @@ -1252,8 +1378,8 @@ class JoinVals { bool PrunedComputed; Val() : Resolution(CR_Keep), WriteLanes(0), ValidLanes(0), - RedefVNI(0), OtherVNI(0), IsImplicitDef(false), Pruned(false), - PrunedComputed(false) {} + RedefVNI(nullptr), OtherVNI(nullptr), ErasableImplicitDef(false), + Pruned(false), PrunedComputed(false) {} bool isAnalyzed() const { return WriteLanes != 0; } }; @@ -1302,7 +1428,7 @@ public: SmallVectorImpl &ShrinkRegs); /// Get the value assignments suitable for passing to LiveInterval::join. - const int *getAssignments() const { return &Assignments[0]; } + const int *getAssignments() const { return Assignments.data(); } }; } // end anonymous namespace @@ -1314,7 +1440,8 @@ unsigned JoinVals::computeWriteLanes(const MachineInstr *DefMI, bool &Redef) { for (ConstMIOperands MO(DefMI); MO.isValid(); ++MO) { if (!MO->isReg() || MO->getReg() != LI.reg || !MO->isDef()) continue; - L |= TRI->getSubRegIndexLaneMask(compose(*TRI, SubIdx, MO->getSubReg())); + L |= TRI->getSubRegIndexLaneMask( + TRI->composeSubRegIndices(SubIdx, MO->getSubReg())); if (MO->readsReg()) Redef = true; } @@ -1331,7 +1458,7 @@ VNInfo *JoinVals::stripCopies(VNInfo *VNI) { unsigned Reg = MI->getOperand(1).getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) break; - LiveRangeQuery LRQ(LIS->getInterval(Reg), VNI->def); + LiveQueryResult LRQ = LIS->getInterval(Reg).Query(VNI->def); if (!LRQ.valueIn()) break; VNI = LRQ.valueIn(); @@ -1357,7 +1484,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { } // Get the instruction defining this value, compute the lanes written. - const MachineInstr *DefMI = 0; + const MachineInstr *DefMI = nullptr; if (VNI->isPHIDef()) { // Conservatively assume that all lanes in a PHI are valid. V.ValidLanes = V.WriteLanes = TRI->getSubRegIndexLaneMask(SubIdx); @@ -1382,7 +1509,7 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { // The flag on the def operand means that old lane values are // not important. if (Redef) { - V.RedefVNI = LiveRangeQuery(LI, VNI->def).valueIn(); + V.RedefVNI = LI.Query(VNI->def).valueIn(); assert(V.RedefVNI && "Instruction is reading nonexistent value"); computeAssignment(V.RedefVNI->id, Other); V.ValidLanes |= Vals[V.RedefVNI->id].ValidLanes; @@ -1390,13 +1517,16 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { // An IMPLICIT_DEF writes undef values. if (DefMI->isImplicitDef()) { - V.IsImplicitDef = true; + // We normally expect IMPLICIT_DEF values to be live only until the end + // of their block. If the value is really live longer and gets pruned in + // another block, this flag is cleared again. + V.ErasableImplicitDef = true; V.ValidLanes &= ~V.WriteLanes; } } // Find the value in Other that overlaps VNI->def, if any. - LiveRangeQuery OtherLRQ(Other.LI, VNI->def); + LiveQueryResult OtherLRQ = Other.LI.Query(VNI->def); // It is possible that both values are defined by the same instruction, or // the values are PHIs defined in the same block. When that happens, the two @@ -1443,7 +1573,22 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { // We have overlapping values, or possibly a kill of Other. // Recursively compute assignments up the dominator tree. Other.computeAssignment(V.OtherVNI->id, *this); - const Val &OtherV = Other.Vals[V.OtherVNI->id]; + Val &OtherV = Other.Vals[V.OtherVNI->id]; + + // Check if OtherV is an IMPLICIT_DEF that extends beyond its basic block. + // This shouldn't normally happen, but ProcessImplicitDefs can leave such + // IMPLICIT_DEF instructions behind, and there is nothing wrong with it + // technically. + // + // WHen it happens, treat that IMPLICIT_DEF as a normal value, and don't try + // to erase the IMPLICIT_DEF instruction. + if (OtherV.ErasableImplicitDef && DefMI && + DefMI->getParent() != Indexes->getMBBFromIndex(V.OtherVNI->def)) { + DEBUG(dbgs() << "IMPLICIT_DEF defined at " << V.OtherVNI->def + << " extends into BB#" << DefMI->getParent()->getNumber() + << ", keeping it.\n"); + OtherV.ErasableImplicitDef = false; + } // Allow overlapping PHI values. Any real interference would show up in a // predecessor, the PHI itself can't introduce any conflicts. @@ -1492,6 +1637,20 @@ JoinVals::analyzeValue(unsigned ValNo, JoinVals &Other) { if ((V.WriteLanes & OtherV.ValidLanes) == 0) return CR_Replace; + // If the other live range is killed by DefMI and the live ranges are still + // overlapping, it must be because we're looking at an early clobber def: + // + // %dst = ASM %src + // + // In this case, it is illegal to merge the two live ranges since the early + // clobber def would clobber %src before it was read. + if (OtherLRQ.isKill()) { + // This case where the def doesn't overlap the kill is handled above. + assert(VNI->def.isEarlyClobber() && + "Only early clobber defs can overlap a kill"); + return CR_Impossible; + } + // VNI is clobbering live lanes in OtherVNI, but there is still the // possibility that no instructions actually read the clobbered lanes. // If we're clobbering all the lanes in OtherVNI, at least one must be read. @@ -1632,8 +1791,8 @@ bool JoinVals::usesLanes(MachineInstr *MI, unsigned Reg, unsigned SubIdx, continue; if (!MO->readsReg()) continue; - if (Lanes & - TRI->getSubRegIndexLaneMask(compose(*TRI, SubIdx, MO->getSubReg()))) + if (Lanes & TRI->getSubRegIndexLaneMask( + TRI->composeSubRegIndices(SubIdx, MO->getSubReg()))) return true; } return false; @@ -1738,7 +1897,8 @@ void JoinVals::pruneValues(JoinVals &Other, // predecessors, so the instruction should simply go away once its value // has been replaced. Val &OtherV = Other.Vals[Vals[i].OtherVNI->id]; - bool EraseImpDef = OtherV.IsImplicitDef && OtherV.Resolution == CR_Keep; + bool EraseImpDef = OtherV.ErasableImplicitDef && + OtherV.Resolution == CR_Keep; if (!Def.isBlock()) { // Remove flags. This def is now a partial redef. // Also remove flags since the joined live range will @@ -1787,7 +1947,7 @@ void JoinVals::eraseInstrs(SmallPtrSet &ErasedInstrs, // If an IMPLICIT_DEF value is pruned, it doesn't serve a purpose any // longer. The IMPLICIT_DEF instructions are only inserted by // PHIElimination to guarantee that all PHI predecessors have a value. - if (!Vals[i].IsImplicitDef || !Vals[i].Pruned) + if (!Vals[i].ErasableImplicitDef || !Vals[i].Pruned) break; // Remove value number i from LI. Note that this VNInfo is still present // in NewVNInfo, so it will appear as an unused value number in the final @@ -1825,8 +1985,8 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { JoinVals RHSVals(RHS, CP.getSrcIdx(), NewVNInfo, CP, LIS, TRI); JoinVals LHSVals(LHS, CP.getDstIdx(), NewVNInfo, CP, LIS, TRI); - DEBUG(dbgs() << "\t\tRHS = " << PrintReg(CP.getSrcReg()) << ' ' << RHS - << "\n\t\tLHS = " << PrintReg(CP.getDstReg()) << ' ' << LHS + DEBUG(dbgs() << "\t\tRHS = " << RHS + << "\n\t\tLHS = " << LHS << '\n'); // First compute NewVNInfo and the simple value mappings. @@ -1857,8 +2017,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { LIS->shrinkToUses(&LIS->getInterval(ShrinkRegs.pop_back_val())); // Join RHS into LHS. - LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo, - MRI); + LHS.join(RHS, LHSVals.getAssignments(), RHSVals.getAssignments(), NewVNInfo); // Kill flags are going to be wrong if the live ranges were overlapping. // Eventually, we should simply clear all kill flags when computing live @@ -1873,7 +2032,7 @@ bool RegisterCoalescer::joinVirtRegs(CoalescerPair &CP) { // CR_Replace conflicts. DEBUG(dbgs() << "\t\trestoring liveness to " << EndPoints.size() << " points: " << LHS << '\n'); - LIS->extendToIndices(&LHS, EndPoints); + LIS->extendToIndices(LHS, EndPoints); return true; } @@ -1884,47 +2043,79 @@ bool RegisterCoalescer::joinIntervals(CoalescerPair &CP) { } namespace { - // DepthMBBCompare - Comparison predicate that sort first based on the loop - // depth of the basic block (the unsigned), and then on the MBB number. - struct DepthMBBCompare { - typedef std::pair DepthMBBPair; - bool operator()(const DepthMBBPair &LHS, const DepthMBBPair &RHS) const { - // Deeper loops first - if (LHS.first != RHS.first) - return LHS.first > RHS.first; - - // Prefer blocks that are more connected in the CFG. This takes care of - // the most difficult copies first while intervals are short. - unsigned cl = LHS.second->pred_size() + LHS.second->succ_size(); - unsigned cr = RHS.second->pred_size() + RHS.second->succ_size(); - if (cl != cr) - return cl > cr; - - // As a last resort, sort by block number. - return LHS.second->getNumber() < RHS.second->getNumber(); - } - }; +// Information concerning MBB coalescing priority. +struct MBBPriorityInfo { + MachineBasicBlock *MBB; + unsigned Depth; + bool IsSplit; + + MBBPriorityInfo(MachineBasicBlock *mbb, unsigned depth, bool issplit) + : MBB(mbb), Depth(depth), IsSplit(issplit) {} +}; +} + +// C-style comparator that sorts first based on the loop depth of the basic +// block (the unsigned), and then on the MBB number. +// +// EnableGlobalCopies assumes that the primary sort key is loop depth. +static int compareMBBPriority(const MBBPriorityInfo *LHS, + const MBBPriorityInfo *RHS) { + // Deeper loops first + if (LHS->Depth != RHS->Depth) + return LHS->Depth > RHS->Depth ? -1 : 1; + + // Try to unsplit critical edges next. + if (LHS->IsSplit != RHS->IsSplit) + return LHS->IsSplit ? -1 : 1; + + // Prefer blocks that are more connected in the CFG. This takes care of + // the most difficult copies first while intervals are short. + unsigned cl = LHS->MBB->pred_size() + LHS->MBB->succ_size(); + unsigned cr = RHS->MBB->pred_size() + RHS->MBB->succ_size(); + if (cl != cr) + return cl > cr ? -1 : 1; + + // As a last resort, sort by block number. + return LHS->MBB->getNumber() < RHS->MBB->getNumber() ? -1 : 1; +} + +/// \returns true if the given copy uses or defines a local live range. +static bool isLocalCopy(MachineInstr *Copy, const LiveIntervals *LIS) { + if (!Copy->isCopy()) + return false; + + if (Copy->getOperand(1).isUndef()) + return false; + + unsigned SrcReg = Copy->getOperand(1).getReg(); + unsigned DstReg = Copy->getOperand(0).getReg(); + if (TargetRegisterInfo::isPhysicalRegister(SrcReg) + || TargetRegisterInfo::isPhysicalRegister(DstReg)) + return false; + + return LIS->intervalIsInOneMBB(LIS->getInterval(SrcReg)) + || LIS->intervalIsInOneMBB(LIS->getInterval(DstReg)); } // Try joining WorkList copies starting from index From. // Null out any successful joins. -bool RegisterCoalescer::copyCoalesceWorkList(unsigned From) { - assert(From <= WorkList.size() && "Out of range"); +bool RegisterCoalescer:: +copyCoalesceWorkList(MutableArrayRef CurrList) { bool Progress = false; - for (unsigned i = From, e = WorkList.size(); i != e; ++i) { - if (!WorkList[i]) + for (unsigned i = 0, e = CurrList.size(); i != e; ++i) { + if (!CurrList[i]) continue; // Skip instruction pointers that have already been erased, for example by // dead code elimination. - if (ErasedInstrs.erase(WorkList[i])) { - WorkList[i] = 0; + if (ErasedInstrs.erase(CurrList[i])) { + CurrList[i] = nullptr; continue; } bool Again = false; - bool Success = joinCopy(WorkList[i], Again); + bool Success = joinCopy(CurrList[i], Again); Progress |= Success; if (Success || !Again) - WorkList[i] = 0; + CurrList[i] = nullptr; } return Progress; } @@ -1936,52 +2127,74 @@ RegisterCoalescer::copyCoalesceInMBB(MachineBasicBlock *MBB) { // Collect all copy-like instructions in MBB. Don't start coalescing anything // yet, it might invalidate the iterator. const unsigned PrevSize = WorkList.size(); - for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end(); - MII != E; ++MII) - if (MII->isCopyLike()) - WorkList.push_back(MII); - + if (JoinGlobalCopies) { + // Coalesce copies bottom-up to coalesce local defs before local uses. They + // are not inherently easier to resolve, but slightly preferable until we + // have local live range splitting. In particular this is required by + // cmp+jmp macro fusion. + for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end(); + MII != E; ++MII) { + if (!MII->isCopyLike()) + continue; + if (isLocalCopy(&(*MII), LIS)) + LocalWorkList.push_back(&(*MII)); + else + WorkList.push_back(&(*MII)); + } + } + else { + for (MachineBasicBlock::iterator MII = MBB->begin(), E = MBB->end(); + MII != E; ++MII) + if (MII->isCopyLike()) + WorkList.push_back(MII); + } // Try coalescing the collected copies immediately, and remove the nulls. // This prevents the WorkList from getting too large since most copies are // joinable on the first attempt. - if (copyCoalesceWorkList(PrevSize)) + MutableArrayRef + CurrList(WorkList.begin() + PrevSize, WorkList.end()); + if (copyCoalesceWorkList(CurrList)) WorkList.erase(std::remove(WorkList.begin() + PrevSize, WorkList.end(), - (MachineInstr*)0), WorkList.end()); + (MachineInstr*)nullptr), WorkList.end()); +} + +void RegisterCoalescer::coalesceLocals() { + copyCoalesceWorkList(LocalWorkList); + for (unsigned j = 0, je = LocalWorkList.size(); j != je; ++j) { + if (LocalWorkList[j]) + WorkList.push_back(LocalWorkList[j]); + } + LocalWorkList.clear(); } void RegisterCoalescer::joinAllIntervals() { DEBUG(dbgs() << "********** JOINING INTERVALS ***********\n"); - assert(WorkList.empty() && "Old data still around."); - - if (Loops->empty()) { - // If there are no loops in the function, join intervals in function order. - for (MachineFunction::iterator I = MF->begin(), E = MF->end(); - I != E; ++I) - copyCoalesceInMBB(I); - } else { - // Otherwise, join intervals in inner loops before other intervals. - // Unfortunately we can't just iterate over loop hierarchy here because - // there may be more MBB's than BB's. Collect MBB's for sorting. - - // Join intervals in the function prolog first. We want to join physical - // registers with virtual registers before the intervals got too long. - std::vector > MBBs; - for (MachineFunction::iterator I = MF->begin(), E = MF->end();I != E;++I){ - MachineBasicBlock *MBB = I; - MBBs.push_back(std::make_pair(Loops->getLoopDepth(MBB), I)); + assert(WorkList.empty() && LocalWorkList.empty() && "Old data still around."); + + std::vector MBBs; + MBBs.reserve(MF->size()); + for (MachineFunction::iterator I = MF->begin(), E = MF->end();I != E;++I){ + MachineBasicBlock *MBB = I; + MBBs.push_back(MBBPriorityInfo(MBB, Loops->getLoopDepth(MBB), + JoinSplitEdges && isSplitEdge(MBB))); + } + array_pod_sort(MBBs.begin(), MBBs.end(), compareMBBPriority); + + // Coalesce intervals in MBB priority order. + unsigned CurrDepth = UINT_MAX; + for (unsigned i = 0, e = MBBs.size(); i != e; ++i) { + // Try coalescing the collected local copies for deeper loops. + if (JoinGlobalCopies && MBBs[i].Depth < CurrDepth) { + coalesceLocals(); + CurrDepth = MBBs[i].Depth; } - - // Sort by loop depth. - std::sort(MBBs.begin(), MBBs.end(), DepthMBBCompare()); - - // Finally, join intervals in loop nest order. - for (unsigned i = 0, e = MBBs.size(); i != e; ++i) - copyCoalesceInMBB(MBBs[i].second); + copyCoalesceInMBB(MBBs[i].MBB); } + coalesceLocals(); // Joining intervals can allow other intervals to be joined. Iteratively join // until we make no progress. - while (copyCoalesceWorkList()) + while (copyCoalesceWorkList(WorkList)) /* empty */ ; } @@ -1999,10 +2212,20 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) { TRI = TM->getRegisterInfo(); TII = TM->getInstrInfo(); LIS = &getAnalysis(); - LDV = &getAnalysis(); AA = &getAnalysis(); Loops = &getAnalysis(); + const TargetSubtargetInfo &ST = TM->getSubtarget(); + if (EnableGlobalCopies == cl::BOU_UNSET) + JoinGlobalCopies = ST.useMachineScheduler(); + else + JoinGlobalCopies = (EnableGlobalCopies == cl::BOU_TRUE); + + // The MachineScheduler does not currently require JoinSplitEdges. This will + // either be enabled unconditionally or replaced by a more general live range + // splitting optimization. + JoinSplitEdges = EnableJoinSplits; + DEBUG(dbgs() << "********** SIMPLE REGISTER COALESCING **********\n" << "********** Function: " << MF->getName() << '\n'); @@ -2034,7 +2257,6 @@ bool RegisterCoalescer::runOnMachineFunction(MachineFunction &fn) { } DEBUG(dump()); - DEBUG(LDV->dump()); if (VerifyCoalescing) MF->verify(this, "After register coalescing"); return true;