X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FMachineLICM.cpp;h=e756dedff474733b44283e795d4c140fc0699c35;hb=80cc2598f89d09a6df2b84a5f8cea813b280b17b;hp=8d0e1358863ce144bdb678db105b79e0e4ccb4b3;hpb=f96e4bd2a3b11928af75fb7472288930d16fec0b;p=oota-llvm.git diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index 8d0e1358863..e756dedff47 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -28,10 +28,10 @@ #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/MC/MCInstrItineraries.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetInstrItineraries.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/ADT/DenseMap.h" @@ -40,9 +40,13 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" - using namespace llvm; +static cl::opt +AvoidSpeculation("avoid-speculation", + cl::desc("MachineLICM should avoid speculation"), + cl::init(true), cl::Hidden); + STATISTIC(NumHoisted, "Number of machine instructions hoisted out of loops"); STATISTIC(NumLowRP, @@ -93,6 +97,17 @@ namespace { // For each opcode, keep a list of potential CSE instructions. DenseMap > CSEMap; + enum { + SpeculateFalse = 0, + SpeculateTrue = 1, + SpeculateUnknown = 2 + }; + + // If a MBB does not dominate loop exiting blocks then it may not safe + // to hoist loads from this block. + // Tri-state: 0 - false, 1 - true, 2 - unknown + unsigned SpeculationState; + public: static char ID; // Pass identification, replacement for typeid MachineLICM() : @@ -110,7 +125,6 @@ namespace { const char *getPassName() const { return "Machine Instruction LICM"; } virtual void getAnalysisUsage(AnalysisUsage &AU) const { - AU.setPreservesCFG(); AU.addRequired(); AU.addRequired(); AU.addRequired(); @@ -171,20 +185,36 @@ namespace { /// bool IsLoopInvariantInst(MachineInstr &I); + /// HasAnyPHIUse - Return true if the specified register is used by any + /// phi node. + bool HasAnyPHIUse(unsigned Reg) const; + /// HasHighOperandLatency - Compute operand latency between a def of 'Reg' /// and an use in the current loop, return true if the target considered /// it 'high'. - bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx, unsigned Reg); + bool HasHighOperandLatency(MachineInstr &MI, unsigned DefIdx, + unsigned Reg) const; - /// IncreaseHighRegPressure - Visit BBs from preheader to current BB, check - /// if hoisting an instruction of the given cost matrix can cause high + bool IsCheapInstruction(MachineInstr &MI) const; + + /// CanCauseHighRegPressure - Visit BBs from header to current BB, + /// check if hoisting an instruction of the given cost matrix can cause high /// register pressure. - bool IncreaseHighRegPressure(DenseMap &Cost); + bool CanCauseHighRegPressure(DenseMap &Cost); + + /// UpdateBackTraceRegPressure - Traverse the back trace from header to + /// the current block and update their register pressures to reflect the + /// effect of hoisting MI from the current block to the preheader. + void UpdateBackTraceRegPressure(const MachineInstr *MI); /// IsProfitableToHoist - Return true if it is potentially profitable to /// hoist the given loop invariant. bool IsProfitableToHoist(MachineInstr &MI); + /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. + /// If not then a load from this mbb may not be safe to hoist. + bool IsGuaranteedToExecute(MachineBasicBlock *BB); + /// HoistRegion - Walk the specified region of the CFG (defined by all /// blocks dominated by the specified block, and that are in the current /// loop) in depth first order w.r.t the DominatorTree. This allows us to @@ -193,20 +223,21 @@ namespace { /// void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false); + /// getRegisterClassIDAndCost - For a given MI, register, and the operand + /// index, return the ID and cost of its representative register class by + /// reference. + void getRegisterClassIDAndCost(const MachineInstr *MI, + unsigned Reg, unsigned OpIdx, + unsigned &RCId, unsigned &RCCost) const; + /// InitRegPressure - Find all virtual register references that are liveout /// of the preheader to initialize the starting "register pressure". Note /// this does not count live through (livein but not used) registers. void InitRegPressure(MachineBasicBlock *BB); - /// UpdateRegPressureBefore / UpdateRegPressureAfter - Update estimate of - /// register pressure before and after executing a specifi instruction. - void UpdateRegPressureBefore(const MachineInstr *MI, - SmallVector &Defs); - void UpdateRegPressureAfter(SmallVector &Defs); - - /// isLoadFromConstantMemory - Return true if the given instruction is a - /// load from constant memory. - bool isLoadFromConstantMemory(MachineInstr *MI); + /// UpdateRegPressure - Update estimate of register pressure after the + /// specified instruction. + void UpdateRegPressure(const MachineInstr *MI); /// ExtractHoistableLoad - Unfold a load from the given machineinstr if /// the load itself could be hoisted. Return the unfolded and hoistable @@ -226,10 +257,14 @@ namespace { bool EliminateCSE(MachineInstr *MI, DenseMap >::iterator &CI); + /// MayCSE - Return true if the given instruction will be CSE'd if it's + /// hoisted out of the loop. + bool MayCSE(MachineInstr *MI); + /// Hoist - When an instruction is found to only use loop invariant operands /// that is safe to hoist, this instruction is called to do the dirty work. - /// - void Hoist(MachineInstr *MI, MachineBasicBlock *Preheader); + /// It returns true if the instruction is hoisted. + bool Hoist(MachineInstr *MI, MachineBasicBlock *Preheader); /// InitCSEMap - Initialize the CSE map with instructions that are in the /// current loop preheader that may become duplicates of instructions that @@ -294,7 +329,7 @@ bool MachineLICM::runOnMachineFunction(MachineFunction &MF) { RegLimit.resize(NumRC); for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(), E = TRI->regclass_end(); I != E; ++I) - RegLimit[(*I)->getID()] = TLI->getRegPressureLimit(*I, MF); + RegLimit[(*I)->getID()] = TRI->getRegPressureLimit(*I, MF); } // Get our Loop information... @@ -438,6 +473,12 @@ void MachineLICM::HoistRegionPostRA() { const std::vector Blocks = CurLoop->getBlocks(); for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *BB = Blocks[i]; + + // If the header of the loop containing this basic block is a landing pad, + // then don't try to hoist instructions out of this loop. + const MachineLoop *ML = MLI->getLoopFor(BB); + if (ML && ML->getHeader()->isLandingPad()) continue; + // Conservatively treat live-in's as an external def. // FIXME: That means a reload that're reused in successor block(s) will not // be LICM'ed. @@ -449,6 +490,7 @@ void MachineLICM::HoistRegionPostRA() { ++PhysRegDefs[*AS]; } + SpeculationState = SpeculateUnknown; for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); MII != E; ++MII) { MachineInstr *MI = &*MII; @@ -542,6 +584,27 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) { Changed = true; } +// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. +// If not then a load from this mbb may not be safe to hoist. +bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) { + if (SpeculationState != SpeculateUnknown) + return SpeculationState == SpeculateFalse; + + if (BB != CurLoop->getHeader()) { + // Check loop exiting blocks. + SmallVector CurrentLoopExitingBlocks; + CurLoop->getExitingBlocks(CurrentLoopExitingBlocks); + for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i) + if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) { + SpeculationState = SpeculateTrue; + return false; + } + } + + SpeculationState = SpeculateFalse; + return true; +} + /// HoistRegion - Walk the specified region of the CFG (defined by all blocks /// dominated by the specified block, and that are in the current loop) in depth /// first order w.r.t the DominatorTree. This allows us to visit definitions @@ -551,6 +614,11 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { assert(N != 0 && "Null dominator tree node?"); MachineBasicBlock *BB = N->getBlock(); + // If the header of the loop containing this basic block is a landing pad, + // then don't try to hoist instructions out of this loop. + const MachineLoop *ML = MLI->getLoopFor(BB); + if (ML && ML->getHeader()->isLandingPad()) return; + // If this subregion is not in the top level loop at all, exit. if (!CurLoop->contains(BB)) return; @@ -559,7 +627,7 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { return; if (IsHeader) { - // Compute registers which are liveout of preheader. + // Compute registers which are livein into the loop headers. RegSeen.clear(); BackTrace.clear(); InitRegPressure(Preheader); @@ -568,17 +636,13 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { // Remember livein register pressure. BackTrace.push_back(RegPressure); - SmallVector Defs; + SpeculationState = SpeculateUnknown; for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); MII != E; ) { MachineBasicBlock::iterator NextMII = MII; ++NextMII; MachineInstr *MI = &*MII; - - assert(Defs.empty()); - UpdateRegPressureBefore(MI, Defs); - Hoist(MI, Preheader); - UpdateRegPressureAfter(Defs); - + if (!Hoist(MI, Preheader)) + UpdateRegPressure(MI); MII = NextMII; } @@ -594,12 +658,44 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { BackTrace.pop_back(); } +static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) { + return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg()); +} + +/// getRegisterClassIDAndCost - For a given MI, register, and the operand +/// index, return the ID and cost of its representative register class. +void +MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI, + unsigned Reg, unsigned OpIdx, + unsigned &RCId, unsigned &RCCost) const { + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + EVT VT = *RC->vt_begin(); + if (VT == MVT::untyped) { + RCId = RC->getID(); + RCCost = 1; + } else { + RCId = TLI->getRepRegClassFor(VT)->getID(); + RCCost = TLI->getRepRegClassCostFor(VT); + } +} + /// InitRegPressure - Find all virtual register references that are liveout of /// the preheader to initialize the starting "register pressure". Note this /// does not count live through (livein but not used) registers. void MachineLICM::InitRegPressure(MachineBasicBlock *BB) { std::fill(RegPressure.begin(), RegPressure.end(), 0); + // If the preheader has only a single predecessor and it ends with a + // fallthrough or an unconditional branch, then scan its predecessor for live + // defs as well. This happens whenever the preheader is created by splitting + // the critical edge from the loop predecessor to the loop header. + if (BB->pred_size() == 1) { + MachineBasicBlock *TBB = 0, *FBB = 0; + SmallVector Cond; + if (!TII->AnalyzeBranch(*BB, TBB, FBB, Cond, false) && Cond.empty()) + InitRegPressure(*BB->pred_begin()); + } + for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); MII != E; ++MII) { MachineInstr *MI = &*MII; @@ -608,70 +704,77 @@ void MachineLICM::InitRegPressure(MachineBasicBlock *BB) { if (!MO.isReg() || MO.isImplicit()) continue; unsigned Reg = MO.getReg(); - if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; bool isNew = RegSeen.insert(Reg); - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); if (MO.isDef()) - RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); + RegPressure[RCId] += RCCost; else { - if (isNew && !MO.isKill()) + bool isKill = isOperandKill(MO, MRI); + if (isNew && !isKill) // Haven't seen this, it must be a livein. - RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); - else if (!isNew && MO.isKill()) - RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); + RegPressure[RCId] += RCCost; + else if (!isNew && isKill) + RegPressure[RCId] -= RCCost; } } } } -/// UpdateRegPressureBefore / UpdateRegPressureAfter - Update estimate of -/// register pressure before and after executing a specifi instruction. -void MachineLICM::UpdateRegPressureBefore(const MachineInstr *MI, - SmallVector &Defs) { - bool NoImpact = MI->isImplicitDef() || MI->isPHI(); +/// UpdateRegPressure - Update estimate of register pressure after the +/// specified instruction. +void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { + if (MI->isImplicitDef()) + return; + SmallVector Defs; for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); if (!MO.isReg() || MO.isImplicit()) continue; unsigned Reg = MO.getReg(); - if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; bool isNew = RegSeen.insert(Reg); - if (NoImpact) - continue; - if (MO.isDef()) Defs.push_back(Reg); - else { - if (!isNew && MO.isKill()) { - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); - - assert(RCCost <= RegPressure[RCId]); + else if (!isNew && isOperandKill(MO, MRI)) { + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); + if (RCCost > RegPressure[RCId]) + RegPressure[RCId] = 0; + else RegPressure[RCId] -= RCCost; - } } } -} -void MachineLICM::UpdateRegPressureAfter(SmallVector &Defs) { + unsigned Idx = 0; while (!Defs.empty()) { unsigned Reg = Defs.pop_back_val(); - RegSeen.insert(Reg); - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost); RegPressure[RCId] += RCCost; + ++Idx; + } +} + +/// isLoadFromGOTOrConstantPool - Return true if this machine instruction +/// loads from global offset table or constant pool. +static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) { + assert (MI.getDesc().mayLoad() && "Expected MI that loads!"); + for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), + E = MI.memoperands_end(); I != E; ++I) { + if (const Value *V = (*I)->getValue()) { + if (const PseudoSourceValue *PSV = dyn_cast(V)) + if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool()) + return true; + } } + return false; } /// IsLICMCandidate - Returns true if the instruction may be a suitable @@ -682,7 +785,17 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) { bool DontMoveAcrossStore = true; if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore)) return false; - + + // If it is load then check if it is guaranteed to execute by making sure that + // it dominates all exiting blocks. If it doesn't, then there is a path out of + // the loop which does not execute this load, so we can't hoist it. Loads + // from constant memory are not safe to speculate all the time, for example + // indexed load from a jump table. + // Stores and side effects are already checked by isSafeToMove. + if (I.getDesc().mayLoad() && !isLoadFromGOTOrConstantPool(I) && + !IsGuaranteedToExecute(I.getParent())) + return false; + return true; } @@ -752,48 +865,38 @@ bool MachineLICM::IsLoopInvariantInst(MachineInstr &I) { } -/// HasPHIUses - Return true if the specified register has any PHI use. -static bool HasPHIUses(unsigned Reg, MachineRegisterInfo *MRI) { +/// HasAnyPHIUse - Return true if the specified register is used by any +/// phi node. +bool MachineLICM::HasAnyPHIUse(unsigned Reg) const { for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(Reg), UE = MRI->use_end(); UI != UE; ++UI) { MachineInstr *UseMI = &*UI; if (UseMI->isPHI()) return true; + // Look pass copies as well. + if (UseMI->isCopy()) { + unsigned Def = UseMI->getOperand(0).getReg(); + if (TargetRegisterInfo::isVirtualRegister(Def) && + HasAnyPHIUse(Def)) + return true; + } } return false; } -/// isLoadFromConstantMemory - Return true if the given instruction is a -/// load from constant memory. Machine LICM will hoist these even if they are -/// not re-materializable. -bool MachineLICM::isLoadFromConstantMemory(MachineInstr *MI) { - if (!MI->getDesc().mayLoad()) return false; - if (!MI->hasOneMemOperand()) return false; - MachineMemOperand *MMO = *MI->memoperands_begin(); - if (MMO->isVolatile()) return false; - if (!MMO->getValue()) return false; - const PseudoSourceValue *PSV = dyn_cast(MMO->getValue()); - if (PSV) { - MachineFunction &MF = *MI->getParent()->getParent(); - return PSV->isConstant(MF.getFrameInfo()); - } else { - return AA->pointsToConstantMemory(AliasAnalysis::Location(MMO->getValue(), - MMO->getSize(), - MMO->getTBAAInfo())); - } -} - /// HasHighOperandLatency - Compute operand latency between a def of 'Reg' /// and an use in the current loop, return true if the target considered /// it 'high'. bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, - unsigned DefIdx, unsigned Reg) { - if (MRI->use_nodbg_empty(Reg)) + unsigned DefIdx, unsigned Reg) const { + if (!InstrItins || InstrItins->isEmpty() || MRI->use_nodbg_empty(Reg)) return false; for (MachineRegisterInfo::use_nodbg_iterator I = MRI->use_nodbg_begin(Reg), E = MRI->use_nodbg_end(); I != E; ++I) { MachineInstr *UseMI = &*I; + if (UseMI->isCopyLike()) + continue; if (!CurLoop->contains(UseMI->getParent())) continue; for (unsigned i = 0, e = UseMI->getNumOperands(); i != e; ++i) { @@ -815,31 +918,99 @@ bool MachineLICM::HasHighOperandLatency(MachineInstr &MI, return false; } -/// IncreaseHighRegPressure - Visit BBs from preheader to current BB, check +/// IsCheapInstruction - Return true if the instruction is marked "cheap" or +/// the operand latency between its def and a use is one or less. +bool MachineLICM::IsCheapInstruction(MachineInstr &MI) const { + if (MI.getDesc().isAsCheapAsAMove() || MI.isCopyLike()) + return true; + if (!InstrItins || InstrItins->isEmpty()) + return false; + + bool isCheap = false; + unsigned NumDefs = MI.getDesc().getNumDefs(); + for (unsigned i = 0, e = MI.getNumOperands(); NumDefs && i != e; ++i) { + MachineOperand &DefMO = MI.getOperand(i); + if (!DefMO.isReg() || !DefMO.isDef()) + continue; + --NumDefs; + unsigned Reg = DefMO.getReg(); + if (TargetRegisterInfo::isPhysicalRegister(Reg)) + continue; + + if (!TII->hasLowDefLatency(InstrItins, &MI, i)) + return false; + isCheap = true; + } + + return isCheap; +} + +/// CanCauseHighRegPressure - Visit BBs from header to current BB, check /// if hoisting an instruction of the given cost matrix can cause high /// register pressure. -bool MachineLICM::IncreaseHighRegPressure(DenseMap &Cost) { - for (unsigned i = BackTrace.size(); i != 0; --i) { - bool AnyIncrease = false; - SmallVector &RP = BackTrace[i-1]; - for (DenseMap::iterator CI = Cost.begin(), CE = Cost.end(); - CI != CE; ++CI) { - if (CI->second <= 0) - continue; - AnyIncrease = true; - unsigned RCId = CI->first; +bool MachineLICM::CanCauseHighRegPressure(DenseMap &Cost) { + for (DenseMap::iterator CI = Cost.begin(), CE = Cost.end(); + CI != CE; ++CI) { + if (CI->second <= 0) + continue; + + unsigned RCId = CI->first; + for (unsigned i = BackTrace.size(); i != 0; --i) { + SmallVector &RP = BackTrace[i-1]; if (RP[RCId] + CI->second >= RegLimit[RCId]) return true; } - - if (!AnyIncrease) - // Hoisting the instruction doesn't increase register pressure. - return false; } return false; } +/// UpdateBackTraceRegPressure - Traverse the back trace from header to the +/// current block and update their register pressures to reflect the effect +/// of hoisting MI from the current block to the preheader. +void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) { + if (MI->isImplicitDef()) + return; + + // First compute the 'cost' of the instruction, i.e. its contribution + // to register pressure. + DenseMap Cost; + for (unsigned i = 0, e = MI->getDesc().getNumOperands(); i != e; ++i) { + const MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || MO.isImplicit()) + continue; + unsigned Reg = MO.getReg(); + if (!TargetRegisterInfo::isVirtualRegister(Reg)) + continue; + + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); + if (MO.isDef()) { + DenseMap::iterator CI = Cost.find(RCId); + if (CI != Cost.end()) + CI->second += RCCost; + else + Cost.insert(std::make_pair(RCId, RCCost)); + } else if (isOperandKill(MO, MRI)) { + DenseMap::iterator CI = Cost.find(RCId); + if (CI != Cost.end()) + CI->second -= RCCost; + else + Cost.insert(std::make_pair(RCId, -RCCost)); + } + } + + // Update register pressure of blocks from loop header to current block. + for (unsigned i = 0, e = BackTrace.size(); i != e; ++i) { + SmallVector &RP = BackTrace[i]; + for (DenseMap::iterator CI = Cost.begin(), CE = Cost.end(); + CI != CE; ++CI) { + unsigned RCId = CI->first; + RP[RCId] += CI->second; + } + } +} + /// IsProfitableToHoist - Return true if it is potentially profitable to hoist /// the given loop invariant. bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { @@ -854,7 +1025,7 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { // trade off is it may cause spill in high pressure situation. It will end up // adding a store in the loop preheader. But the reload is no more expensive. // The side benefit is these loads are frequently CSE'ed. - if (MI.getDesc().isAsCheapAsAMove()) { + if (IsCheapInstruction(MI)) { if (!TII->isTriviallyReMaterializable(&MI, AA)) return false; } else { @@ -862,40 +1033,34 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { // In low register pressure situation, we can be more aggressive about // hoisting. Also, favors hoisting long latency instructions even in // moderately high pressure situation. + // FIXME: If there are long latency loop-invariant instructions inside the + // loop at this point, why didn't the optimizer's LICM hoist them? DenseMap Cost; for (unsigned i = 0, e = MI.getDesc().getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || MO.isImplicit()) continue; unsigned Reg = MO.getReg(); - if (!Reg || TargetRegisterInfo::isPhysicalRegister(Reg)) + if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; + + unsigned RCId, RCCost; + getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost); if (MO.isDef()) { if (HasHighOperandLatency(MI, i, Reg)) { ++NumHighLatency; return true; } - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); DenseMap::iterator CI = Cost.find(RCId); - // If the instruction is not register pressure neutrail (or better), - // check if hoisting it will cause high register pressure in BB's - // leading up to this point. if (CI != Cost.end()) CI->second += RCCost; else Cost.insert(std::make_pair(RCId, RCCost)); - } else if (MO.isKill()) { + } else if (isOperandKill(MO, MRI)) { // Is a virtual register use is a kill, hoisting it out of the loop // may actually reduce register pressure or be register pressure - // neutral - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); + // neutral. DenseMap::iterator CI = Cost.find(RCId); if (CI != Cost.end()) CI->second -= RCCost; @@ -904,28 +1069,34 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { } } - // Visit BBs from preheader to current BB, if hoisting this doesn't cause + // Visit BBs from header to current BB, if hoisting this doesn't cause // high register pressure, then it's safe to proceed. - if (!IncreaseHighRegPressure(Cost)) { + if (!CanCauseHighRegPressure(Cost)) { ++NumLowRP; return true; } + // Do not "speculate" in high register pressure situation. If an + // instruction is not guaranteed to be executed in the loop, it's best to be + // conservative. + if (AvoidSpeculation && + (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) + return false; + // High register pressure situation, only hoist if the instruction is going to // be remat'ed. if (!TII->isTriviallyReMaterializable(&MI, AA) && - !isLoadFromConstantMemory(&MI)) + !MI.isInvariantLoad(AA)) return false; } - // If result(s) of this instruction is used by PHIs, then don't hoist it. - // The presence of joins makes it difficult for current register allocator - // implementation to perform remat. + // If result(s) of this instruction is used by PHIs outside of the loop, then + // don't hoist it if the instruction because it will introduce an extra copy. for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI.getOperand(i); if (!MO.isReg() || !MO.isDef()) continue; - if (HasPHIUses(MO.getReg(), MRI)) + if (HasAnyPHIUse(MO.getReg())) return false; } @@ -940,7 +1111,7 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { // If not, we may be able to unfold a load and hoist that. // First test whether the instruction is loading from an amenable // memory location. - if (!isLoadFromConstantMemory(MI)) + if (!MI->isInvariantLoad(AA)) return 0; // Next determine the register class for a temporary register. @@ -951,9 +1122,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { /*UnfoldStore=*/false, &LoadRegIndex); if (NewOpc == 0) return 0; - const TargetInstrDesc &TID = TII->get(NewOpc); - if (TID.getNumDefs() != 1) return 0; - const TargetRegisterClass *RC = TID.OpInfo[LoadRegIndex].getRegClass(TRI); + const MCInstrDesc &MID = TII->get(NewOpc); + if (MID.getNumDefs() != 1) return 0; + const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI); // Ok, we're unfolding. Create a temporary register and do the unfold. unsigned Reg = MRI->createVirtualRegister(RC); @@ -979,6 +1150,10 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { NewMIs[1]->eraseFromParent(); return 0; } + + // Update register pressure for the unfolded instruction. + UpdateRegPressure(NewMIs[1]); + // Otherwise we successfully unfolded a load that we can hoist. MI->eraseFromParent(); return NewMIs[0]; @@ -987,20 +1162,15 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { void MachineLICM::InitCSEMap(MachineBasicBlock *BB) { for (MachineBasicBlock::iterator I = BB->begin(),E = BB->end(); I != E; ++I) { const MachineInstr *MI = &*I; - // FIXME: For now, only hoist re-materilizable instructions. LICM will - // increase register pressure. We want to make sure it doesn't increase - // spilling. - if (TII->isTriviallyReMaterializable(MI, AA)) { - unsigned Opcode = MI->getOpcode(); - DenseMap >::iterator - CI = CSEMap.find(Opcode); - if (CI != CSEMap.end()) - CI->second.push_back(MI); - else { - std::vector CSEMIs; - CSEMIs.push_back(MI); - CSEMap.insert(std::make_pair(Opcode, CSEMIs)); - } + unsigned Opcode = MI->getOpcode(); + DenseMap >::iterator + CI = CSEMap.find(Opcode); + if (CI != CSEMap.end()) + CI->second.push_back(MI); + else { + std::vector CSEMIs; + CSEMIs.push_back(MI); + CSEMap.insert(std::make_pair(Opcode, CSEMIs)); } } } @@ -1010,7 +1180,7 @@ MachineLICM::LookForDuplicate(const MachineInstr *MI, std::vector &PrevMIs) { for (unsigned i = 0, e = PrevMIs.size(); i != e; ++i) { const MachineInstr *PrevMI = PrevMIs[i]; - if (TII->produceSameValue(MI, PrevMI)) + if (TII->produceSameValue(MI, PrevMI, (PreRegAlloc ? MRI : 0))) return PrevMI; } return 0; @@ -1028,6 +1198,7 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, // Replace virtual registers defined by MI by their counterparts defined // by Dup. + SmallVector Defs; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); @@ -1038,11 +1209,33 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, "Instructions with different phys regs are not identical!"); if (MO.isReg() && MO.isDef() && - !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { - MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg()); - MRI->clearKillFlags(Dup->getOperand(i).getReg()); + !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) + Defs.push_back(i); + } + + SmallVector OrigRCs; + for (unsigned i = 0, e = Defs.size(); i != e; ++i) { + unsigned Idx = Defs[i]; + unsigned Reg = MI->getOperand(Idx).getReg(); + unsigned DupReg = Dup->getOperand(Idx).getReg(); + OrigRCs.push_back(MRI->getRegClass(DupReg)); + + if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) { + // Restore old RCs if more than one defs. + for (unsigned j = 0; j != i; ++j) + MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]); + return false; } } + + for (unsigned i = 0, e = Defs.size(); i != e; ++i) { + unsigned Idx = Defs[i]; + unsigned Reg = MI->getOperand(Idx).getReg(); + unsigned DupReg = Dup->getOperand(Idx).getReg(); + MRI->replaceRegWith(Reg, DupReg); + MRI->clearKillFlags(DupReg); + } + MI->eraseFromParent(); ++NumCSEed; return true; @@ -1050,15 +1243,29 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, return false; } +/// MayCSE - Return true if the given instruction will be CSE'd if it's +/// hoisted out of the loop. +bool MachineLICM::MayCSE(MachineInstr *MI) { + unsigned Opcode = MI->getOpcode(); + DenseMap >::iterator + CI = CSEMap.find(Opcode); + // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate + // the undef property onto uses. + if (CI == CSEMap.end() || MI->isImplicitDef()) + return false; + + return LookForDuplicate(MI, CI->second) != 0; +} + /// Hoist - When an instruction is found to use only loop invariant operands /// that are safe to hoist, this instruction is called to do the dirty work. /// -void MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { +bool MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { // First check whether we should hoist this instruction. if (!IsLoopInvariantInst(*MI) || !IsProfitableToHoist(*MI)) { // If not, try unfolding a hoistable load. MI = ExtractHoistableLoad(MI); - if (!MI) return; + if (!MI) return false; } // Now move the instructions to the predecessor, inserting it before any @@ -1089,6 +1296,9 @@ void MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { // Otherwise, splice the instruction to the preheader. Preheader->splice(Preheader->getFirstTerminator(),MI->getParent(),MI); + // Update register pressure for BBs from header to this block. + UpdateBackTraceRegPressure(MI); + // Clear the kill flags of any register this instruction defines, // since they may need to be live throughout the entire loop // rather than just live for part of it. @@ -1110,6 +1320,8 @@ void MachineLICM::Hoist(MachineInstr *MI, MachineBasicBlock *Preheader) { ++NumHoisted; Changed = true; + + return true; } MachineBasicBlock *MachineLICM::getCurPreheader() {