X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FMachineLICM.cpp;h=e756dedff474733b44283e795d4c140fc0699c35;hb=80cc2598f89d09a6df2b84a5f8cea813b280b17b;hp=cfd0325ee9a3a5c9059a2dace4e39f8dcccb8041;hpb=15993f83a419950f06d2879d6701530ae6449317;p=oota-llvm.git diff --git a/lib/CodeGen/MachineLICM.cpp b/lib/CodeGen/MachineLICM.cpp index cfd0325ee9a..e756dedff47 100644 --- a/lib/CodeGen/MachineLICM.cpp +++ b/lib/CodeGen/MachineLICM.cpp @@ -28,19 +28,25 @@ #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" +#include "llvm/MC/MCInstrItineraries.h" #include "llvm/Target/TargetLowering.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetInstrInfo.h" -#include "llvm/Target/TargetInstrItineraries.h" #include "llvm/Target/TargetMachine.h" #include "llvm/Analysis/AliasAnalysis.h" #include "llvm/ADT/DenseMap.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" +#include "llvm/Support/CommandLine.h" #include "llvm/Support/Debug.h" #include "llvm/Support/raw_ostream.h" using namespace llvm; +static cl::opt +AvoidSpeculation("avoid-speculation", + cl::desc("MachineLICM should avoid speculation"), + cl::init(true), cl::Hidden); + STATISTIC(NumHoisted, "Number of machine instructions hoisted out of loops"); STATISTIC(NumLowRP, @@ -91,6 +97,17 @@ namespace { // For each opcode, keep a list of potential CSE instructions. DenseMap > CSEMap; + enum { + SpeculateFalse = 0, + SpeculateTrue = 1, + SpeculateUnknown = 2 + }; + + // If a MBB does not dominate loop exiting blocks then it may not safe + // to hoist loads from this block. + // Tri-state: 0 - false, 1 - true, 2 - unknown + unsigned SpeculationState; + public: static char ID; // Pass identification, replacement for typeid MachineLICM() : @@ -194,6 +211,10 @@ namespace { /// hoist the given loop invariant. bool IsProfitableToHoist(MachineInstr &MI); + /// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. + /// If not then a load from this mbb may not be safe to hoist. + bool IsGuaranteedToExecute(MachineBasicBlock *BB); + /// HoistRegion - Walk the specified region of the CFG (defined by all /// blocks dominated by the specified block, and that are in the current /// loop) in depth first order w.r.t the DominatorTree. This allows us to @@ -202,6 +223,13 @@ namespace { /// void HoistRegion(MachineDomTreeNode *N, bool IsHeader = false); + /// getRegisterClassIDAndCost - For a given MI, register, and the operand + /// index, return the ID and cost of its representative register class by + /// reference. + void getRegisterClassIDAndCost(const MachineInstr *MI, + unsigned Reg, unsigned OpIdx, + unsigned &RCId, unsigned &RCCost) const; + /// InitRegPressure - Find all virtual register references that are liveout /// of the preheader to initialize the starting "register pressure". Note /// this does not count live through (livein but not used) registers. @@ -229,6 +257,10 @@ namespace { bool EliminateCSE(MachineInstr *MI, DenseMap >::iterator &CI); + /// MayCSE - Return true if the given instruction will be CSE'd if it's + /// hoisted out of the loop. + bool MayCSE(MachineInstr *MI); + /// Hoist - When an instruction is found to only use loop invariant operands /// that is safe to hoist, this instruction is called to do the dirty work. /// It returns true if the instruction is hoisted. @@ -441,6 +473,12 @@ void MachineLICM::HoistRegionPostRA() { const std::vector Blocks = CurLoop->getBlocks(); for (unsigned i = 0, e = Blocks.size(); i != e; ++i) { MachineBasicBlock *BB = Blocks[i]; + + // If the header of the loop containing this basic block is a landing pad, + // then don't try to hoist instructions out of this loop. + const MachineLoop *ML = MLI->getLoopFor(BB); + if (ML && ML->getHeader()->isLandingPad()) continue; + // Conservatively treat live-in's as an external def. // FIXME: That means a reload that're reused in successor block(s) will not // be LICM'ed. @@ -452,6 +490,7 @@ void MachineLICM::HoistRegionPostRA() { ++PhysRegDefs[*AS]; } + SpeculationState = SpeculateUnknown; for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); MII != E; ++MII) { MachineInstr *MI = &*MII; @@ -545,6 +584,27 @@ void MachineLICM::HoistPostRA(MachineInstr *MI, unsigned Def) { Changed = true; } +// IsGuaranteedToExecute - Check if this mbb is guaranteed to execute. +// If not then a load from this mbb may not be safe to hoist. +bool MachineLICM::IsGuaranteedToExecute(MachineBasicBlock *BB) { + if (SpeculationState != SpeculateUnknown) + return SpeculationState == SpeculateFalse; + + if (BB != CurLoop->getHeader()) { + // Check loop exiting blocks. + SmallVector CurrentLoopExitingBlocks; + CurLoop->getExitingBlocks(CurrentLoopExitingBlocks); + for (unsigned i = 0, e = CurrentLoopExitingBlocks.size(); i != e; ++i) + if (!DT->dominates(BB, CurrentLoopExitingBlocks[i])) { + SpeculationState = SpeculateTrue; + return false; + } + } + + SpeculationState = SpeculateFalse; + return true; +} + /// HoistRegion - Walk the specified region of the CFG (defined by all blocks /// dominated by the specified block, and that are in the current loop) in depth /// first order w.r.t the DominatorTree. This allows us to visit definitions @@ -554,6 +614,11 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { assert(N != 0 && "Null dominator tree node?"); MachineBasicBlock *BB = N->getBlock(); + // If the header of the loop containing this basic block is a landing pad, + // then don't try to hoist instructions out of this loop. + const MachineLoop *ML = MLI->getLoopFor(BB); + if (ML && ML->getHeader()->isLandingPad()) return; + // If this subregion is not in the top level loop at all, exit. if (!CurLoop->contains(BB)) return; @@ -571,6 +636,7 @@ void MachineLICM::HoistRegion(MachineDomTreeNode *N, bool IsHeader) { // Remember livein register pressure. BackTrace.push_back(RegPressure); + SpeculationState = SpeculateUnknown; for (MachineBasicBlock::iterator MII = BB->begin(), E = BB->end(); MII != E; ) { MachineBasicBlock::iterator NextMII = MII; ++NextMII; @@ -596,6 +662,23 @@ static bool isOperandKill(const MachineOperand &MO, MachineRegisterInfo *MRI) { return MO.isKill() || MRI->hasOneNonDBGUse(MO.getReg()); } +/// getRegisterClassIDAndCost - For a given MI, register, and the operand +/// index, return the ID and cost of its representative register class. +void +MachineLICM::getRegisterClassIDAndCost(const MachineInstr *MI, + unsigned Reg, unsigned OpIdx, + unsigned &RCId, unsigned &RCCost) const { + const TargetRegisterClass *RC = MRI->getRegClass(Reg); + EVT VT = *RC->vt_begin(); + if (VT == MVT::untyped) { + RCId = RC->getID(); + RCCost = 1; + } else { + RCId = TLI->getRepRegClassFor(VT)->getID(); + RCCost = TLI->getRepRegClassCostFor(VT); + } +} + /// InitRegPressure - Find all virtual register references that are liveout of /// the preheader to initialize the starting "register pressure". Note this /// does not count live through (livein but not used) registers. @@ -625,18 +708,17 @@ void MachineLICM::InitRegPressure(MachineBasicBlock *BB) { continue; bool isNew = RegSeen.insert(Reg); - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); if (MO.isDef()) - RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); + RegPressure[RCId] += RCCost; else { bool isKill = isOperandKill(MO, MRI); if (isNew && !isKill) // Haven't seen this, it must be a livein. - RegPressure[RCId] += TLI->getRepRegClassCostFor(VT); + RegPressure[RCId] += RCCost; else if (!isNew && isKill) - RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT); + RegPressure[RCId] -= RCCost; } } } @@ -661,11 +743,8 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { if (MO.isDef()) Defs.push_back(Reg); else if (!isNew && isOperandKill(MO, MRI)) { - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); - + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); if (RCCost > RegPressure[RCId]) RegPressure[RCId] = 0; else @@ -673,14 +752,29 @@ void MachineLICM::UpdateRegPressure(const MachineInstr *MI) { } } + unsigned Idx = 0; while (!Defs.empty()) { unsigned Reg = Defs.pop_back_val(); - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, Idx, RCId, RCCost); RegPressure[RCId] += RCCost; + ++Idx; + } +} + +/// isLoadFromGOTOrConstantPool - Return true if this machine instruction +/// loads from global offset table or constant pool. +static bool isLoadFromGOTOrConstantPool(MachineInstr &MI) { + assert (MI.getDesc().mayLoad() && "Expected MI that loads!"); + for (MachineInstr::mmo_iterator I = MI.memoperands_begin(), + E = MI.memoperands_end(); I != E; ++I) { + if (const Value *V = (*I)->getValue()) { + if (const PseudoSourceValue *PSV = dyn_cast(V)) + if (PSV == PSV->getGOT() || PSV == PSV->getConstantPool()) + return true; + } } + return false; } /// IsLICMCandidate - Returns true if the instruction may be a suitable @@ -691,7 +785,17 @@ bool MachineLICM::IsLICMCandidate(MachineInstr &I) { bool DontMoveAcrossStore = true; if (!I.isSafeToMove(TII, AA, DontMoveAcrossStore)) return false; - + + // If it is load then check if it is guaranteed to execute by making sure that + // it dominates all exiting blocks. If it doesn't, then there is a path out of + // the loop which does not execute this load, so we can't hoist it. Loads + // from constant memory are not safe to speculate all the time, for example + // indexed load from a jump table. + // Stores and side effects are already checked by isSafeToMove. + if (I.getDesc().mayLoad() && !isLoadFromGOTOrConstantPool(I) && + !IsGuaranteedToExecute(I.getParent())) + return false; + return true; } @@ -879,10 +983,8 @@ void MachineLICM::UpdateBackTraceRegPressure(const MachineInstr *MI) { if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); + unsigned RCId, RCCost; + getRegisterClassIDAndCost(MI, Reg, i, RCId, RCCost); if (MO.isDef()) { DenseMap::iterator CI = Cost.find(RCId); if (CI != Cost.end()) @@ -941,16 +1043,15 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { unsigned Reg = MO.getReg(); if (!TargetRegisterInfo::isVirtualRegister(Reg)) continue; + + unsigned RCId, RCCost; + getRegisterClassIDAndCost(&MI, Reg, i, RCId, RCCost); if (MO.isDef()) { if (HasHighOperandLatency(MI, i, Reg)) { ++NumHighLatency; return true; } - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); DenseMap::iterator CI = Cost.find(RCId); if (CI != Cost.end()) CI->second += RCCost; @@ -960,10 +1061,6 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { // Is a virtual register use is a kill, hoisting it out of the loop // may actually reduce register pressure or be register pressure // neutral. - const TargetRegisterClass *RC = MRI->getRegClass(Reg); - EVT VT = *RC->vt_begin(); - unsigned RCId = TLI->getRepRegClassFor(VT)->getID(); - unsigned RCCost = TLI->getRepRegClassCostFor(VT); DenseMap::iterator CI = Cost.find(RCId); if (CI != Cost.end()) CI->second -= RCCost; @@ -979,6 +1076,13 @@ bool MachineLICM::IsProfitableToHoist(MachineInstr &MI) { return true; } + // Do not "speculate" in high register pressure situation. If an + // instruction is not guaranteed to be executed in the loop, it's best to be + // conservative. + if (AvoidSpeculation && + (!IsGuaranteedToExecute(MI.getParent()) && !MayCSE(&MI))) + return false; + // High register pressure situation, only hoist if the instruction is going to // be remat'ed. if (!TII->isTriviallyReMaterializable(&MI, AA) && @@ -1018,9 +1122,9 @@ MachineInstr *MachineLICM::ExtractHoistableLoad(MachineInstr *MI) { /*UnfoldStore=*/false, &LoadRegIndex); if (NewOpc == 0) return 0; - const TargetInstrDesc &TID = TII->get(NewOpc); - if (TID.getNumDefs() != 1) return 0; - const TargetRegisterClass *RC = TII->getRegClass(TID, LoadRegIndex, TRI); + const MCInstrDesc &MID = TII->get(NewOpc); + if (MID.getNumDefs() != 1) return 0; + const TargetRegisterClass *RC = TII->getRegClass(MID, LoadRegIndex, TRI); // Ok, we're unfolding. Create a temporary register and do the unfold. unsigned Reg = MRI->createVirtualRegister(RC); @@ -1094,6 +1198,7 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, // Replace virtual registers defined by MI by their counterparts defined // by Dup. + SmallVector Defs; for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { const MachineOperand &MO = MI->getOperand(i); @@ -1104,11 +1209,33 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, "Instructions with different phys regs are not identical!"); if (MO.isReg() && MO.isDef() && - !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) { - MRI->replaceRegWith(MO.getReg(), Dup->getOperand(i).getReg()); - MRI->clearKillFlags(Dup->getOperand(i).getReg()); + !TargetRegisterInfo::isPhysicalRegister(MO.getReg())) + Defs.push_back(i); + } + + SmallVector OrigRCs; + for (unsigned i = 0, e = Defs.size(); i != e; ++i) { + unsigned Idx = Defs[i]; + unsigned Reg = MI->getOperand(Idx).getReg(); + unsigned DupReg = Dup->getOperand(Idx).getReg(); + OrigRCs.push_back(MRI->getRegClass(DupReg)); + + if (!MRI->constrainRegClass(DupReg, MRI->getRegClass(Reg))) { + // Restore old RCs if more than one defs. + for (unsigned j = 0; j != i; ++j) + MRI->setRegClass(Dup->getOperand(Defs[j]).getReg(), OrigRCs[j]); + return false; } } + + for (unsigned i = 0, e = Defs.size(); i != e; ++i) { + unsigned Idx = Defs[i]; + unsigned Reg = MI->getOperand(Idx).getReg(); + unsigned DupReg = Dup->getOperand(Idx).getReg(); + MRI->replaceRegWith(Reg, DupReg); + MRI->clearKillFlags(DupReg); + } + MI->eraseFromParent(); ++NumCSEed; return true; @@ -1116,6 +1243,20 @@ bool MachineLICM::EliminateCSE(MachineInstr *MI, return false; } +/// MayCSE - Return true if the given instruction will be CSE'd if it's +/// hoisted out of the loop. +bool MachineLICM::MayCSE(MachineInstr *MI) { + unsigned Opcode = MI->getOpcode(); + DenseMap >::iterator + CI = CSEMap.find(Opcode); + // Do not CSE implicit_def so ProcessImplicitDefs can properly propagate + // the undef property onto uses. + if (CI == CSEMap.end() || MI->isImplicitDef()) + return false; + + return LookForDuplicate(MI, CI->second) != 0; +} + /// Hoist - When an instruction is found to use only loop invariant operands /// that are safe to hoist, this instruction is called to do the dirty work. ///