X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FScheduleDAGInstrs.cpp;h=65075893e5ab43e9e68871060d9fb296099c6ab7;hb=3e2346341cc7407bb895ddb13e43d512713e8a35;hp=428c1a40320e5a0fd40814b8ae2de6bc6bffebe3;hpb=bfb8223e2b2a55c3ac6c73be0ac99bbce17cb097;p=oota-llvm.git diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index 428c1a40320..65075893e5a 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -21,6 +21,7 @@ #include "llvm/Analysis/ValueTracking.h" #include "llvm/CodeGen/LiveIntervalAnalysis.h" #include "llvm/CodeGen/MachineFunctionPass.h" +#include "llvm/CodeGen/MachineInstrBuilder.h" #include "llvm/CodeGen/MachineMemOperand.h" #include "llvm/CodeGen/MachineRegisterInfo.h" #include "llvm/CodeGen/PseudoSourceValue.h" @@ -36,6 +37,8 @@ #include "llvm/Target/TargetMachine.h" #include "llvm/Target/TargetRegisterInfo.h" #include "llvm/Target/TargetSubtargetInfo.h" +#include + using namespace llvm; static cl::opt EnableAASchedMI("enable-aa-sched-mi", cl::Hidden, @@ -46,9 +49,11 @@ ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf, const MachineLoopInfo &mli, const MachineDominatorTree &mdt, bool IsPostRAFlag, + bool RemoveKillFlags, LiveIntervals *lis) : ScheduleDAG(mf), MLI(mli), MDT(mdt), MFI(mf.getFrameInfo()), LIS(lis), - IsPostRA(IsPostRAFlag), CanHandleTerminators(false), FirstDbgValue(0) { + IsPostRA(IsPostRAFlag), RemoveKillFlags(RemoveKillFlags), + CanHandleTerminators(false), FirstDbgValue(0) { assert((IsPostRA || LIS) && "PreRA scheduling requires LiveIntervals"); DbgValues.clear(); assert(!(IsPostRA && MRI.getNumVirtRegs()) && @@ -98,7 +103,7 @@ static void getUnderlyingObjects(const Value *V, SmallVector Objs; GetUnderlyingObjects(const_cast(V), Objs); - for (SmallVector::iterator I = Objs.begin(), IE = Objs.end(); + for (SmallVectorImpl::iterator I = Objs.begin(), IE = Objs.end(); I != IE; ++I) { V = *I; if (!Visited.insert(V)) @@ -116,12 +121,15 @@ static void getUnderlyingObjects(const Value *V, } while (!Working.empty()); } +typedef SmallVector, 4> +UnderlyingObjectsVector; + /// getUnderlyingObjectsForInstr - If this machine instr has memory reference /// information and it can be tracked to a normal reference to a known /// object, return the Value for that object. static void getUnderlyingObjectsForInstr(const MachineInstr *MI, - const MachineFrameInfo *MFI, - SmallVectorImpl > &Objects) { + const MachineFrameInfo *MFI, + UnderlyingObjectsVector &Objects) { if (!MI->hasOneMemOperand() || !(*MI->memoperands_begin())->getValue() || (*MI->memoperands_begin())->isVolatile()) @@ -134,8 +142,8 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI, SmallVector Objs; getUnderlyingObjects(V, Objs); - for (SmallVector::iterator I = Objs.begin(), IE = Objs.end(); - I != IE; ++I) { + for (SmallVectorImpl::iterator I = Objs.begin(), IE = Objs.end(); + I != IE; ++I) { bool MayAlias = true; V = *I; @@ -155,7 +163,7 @@ static void getUnderlyingObjectsForInstr(const MachineInstr *MI, return; } - Objects.push_back(std::make_pair(V, MayAlias)); + Objects.push_back(UnderlyingObjectsVector::value_type(V, MayAlias)); } } @@ -175,14 +183,11 @@ void ScheduleDAGInstrs::finishBlock() { void ScheduleDAGInstrs::enterRegion(MachineBasicBlock *bb, MachineBasicBlock::iterator begin, MachineBasicBlock::iterator end, - unsigned endcount) { + unsigned regioninstrs) { assert(bb == BB && "startBlock should set BB"); RegionBegin = begin; RegionEnd = end; - EndIndex = endcount; - MISUnitMap.clear(); - - ScheduleDAG::clearDAG(); + NumRegionInstrs = regioninstrs; } /// Close the current scheduling region. Don't clear any state in case the @@ -262,15 +267,15 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { if (UseOp < 0) Dep = SDep(SU, SDep::Artificial); else { + // Set the hasPhysRegDefs only for physreg defs that have a use within + // the scheduling region. + SU->hasPhysRegDefs = true; Dep = SDep(SU, SDep::Data, *Alias); RegUse = UseSU->getInstr(); - Dep.setMinLatency( - SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, - RegUse, UseOp, /*FindMin=*/true)); } Dep.setLatency( - SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, - RegUse, UseOp, /*FindMin=*/false)); + SchedModel.computeOperandLatency(SU->getInstr(), OperIdx, RegUse, + UseOp)); ST.adjustSchedDependency(SU, UseSU, Dep); UseSU->addPred(Dep); @@ -282,8 +287,8 @@ void ScheduleDAGInstrs::addPhysRegDataDeps(SUnit *SU, unsigned OperIdx) { /// this SUnit to following instructions in the same scheduling region that /// depend the physical register referenced at OperIdx. void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { - const MachineInstr *MI = SU->getInstr(); - const MachineOperand &MO = MI->getOperand(OperIdx); + MachineInstr *MI = SU->getInstr(); + MachineOperand &MO = MI->getOperand(OperIdx); // Optionally add output and anti dependencies. For anti // dependencies we use a latency of 0 because for a multi-issue @@ -307,10 +312,8 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { DefSU->addPred(SDep(SU, Kind, /*Reg=*/*Alias)); else { SDep Dep(SU, Kind, /*Reg=*/*Alias); - unsigned OutLatency = - SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()); - Dep.setMinLatency(OutLatency); - Dep.setLatency(OutLatency); + Dep.setLatency( + SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); DefSU->addPred(Dep); } } @@ -318,10 +321,13 @@ void ScheduleDAGInstrs::addPhysRegDeps(SUnit *SU, unsigned OperIdx) { } if (!MO.isDef()) { + SU->hasPhysRegUses = true; // Either insert a new Reg2SUnits entry with an empty SUnits list, or // retrieve the existing SUnits list for this register's uses. // Push this SUnit on the use list. Uses.insert(PhysRegSUOper(SU, OperIdx, MO.getReg())); + if (RemoveKillFlags) + MO.setIsKill(false); } else { addPhysRegDataDeps(SU, OperIdx); @@ -385,10 +391,8 @@ void ScheduleDAGInstrs::addVRegDefDeps(SUnit *SU, unsigned OperIdx) { SUnit *DefSU = DefI->SU; if (DefSU != SU && DefSU != &ExitSU) { SDep Dep(SU, SDep::Output, Reg); - unsigned OutLatency = - SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr()); - Dep.setMinLatency(OutLatency); - Dep.setLatency(OutLatency); + Dep.setLatency( + SchedModel.computeOutputLatency(MI, OperIdx, DefSU->getInstr())); DefSU->addPred(Dep); } DefI->SU = SU; @@ -405,9 +409,19 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { MachineInstr *MI = SU->getInstr(); unsigned Reg = MI->getOperand(OperIdx).getReg(); + // Record this local VReg use. + VReg2UseMap::iterator UI = VRegUses.find(Reg); + for (; UI != VRegUses.end(); ++UI) { + if (UI->SU == SU) + break; + } + if (UI == VRegUses.end()) + VRegUses.insert(VReg2SUnit(Reg, SU)); + // Lookup this operand's reaching definition. assert(LIS && "vreg dependencies requires LiveIntervals"); - LiveRangeQuery LRQ(LIS->getInterval(Reg), LIS->getInstructionIndex(MI)); + LiveQueryResult LRQ + = LIS->getInterval(Reg).Query(LIS->getInstructionIndex(MI)); VNInfo *VNI = LRQ.valueIn(); // VNI will be valid because MachineOperand::readsReg() is checked by caller. @@ -423,10 +437,7 @@ void ScheduleDAGInstrs::addVRegUseDeps(SUnit *SU, unsigned OperIdx) { // Adjust the dependence latency using operand def/use information, then // allow the target to perform its own adjustments. int DefOp = Def->findRegisterDefOperandIdx(Reg); - dep.setLatency( - SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, false)); - dep.setMinLatency( - SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx, true)); + dep.setLatency(SchedModel.computeOperandLatency(Def, DefOp, MI, OperIdx)); const TargetSubtargetInfo &ST = TM.getSubtarget(); ST.adjustSchedDependency(DefSU, SU, const_cast(dep)); @@ -468,8 +479,8 @@ static inline bool isUnsafeMemoryObject(MachineInstr *MI, SmallVector Objs; getUnderlyingObjects(V, Objs); - for (SmallVector::iterator I = Objs.begin(), - IE = Objs.end(); I != IE; ++I) { + for (SmallVectorImpl::iterator I = Objs.begin(), + IE = Objs.end(); I != IE; ++I) { V = *I; if (const PseudoSourceValue *PSV = dyn_cast(V)) { @@ -501,6 +512,10 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, if (MIa == MIb) return false; + // FIXME: Need to handle multiple memory operands to support all targets. + if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) + return true; + if (isUnsafeMemoryObject(MIa, MFI) || isUnsafeMemoryObject(MIb, MFI)) return true; @@ -516,10 +531,6 @@ static bool MIsNeedChainEdge(AliasAnalysis *AA, const MachineFrameInfo *MFI, MachineMemOperand *MMOa = *MIa->memoperands_begin(); MachineMemOperand *MMOb = *MIb->memoperands_begin(); - // FIXME: Need to handle multiple memory operands to support all targets. - if (!MIa->hasOneMemOperand() || !MIb->hasOneMemOperand()) - llvm_unreachable("Multiple memory operands."); - // The following interface to AA is fashioned after DAGCombiner::isAlias // and operates with MachineMemOperand offset with some important // assumptions: @@ -638,8 +649,7 @@ void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI, bool isNormalMemory = false) { // If this is a false dependency, // do not add the edge, but rememeber the rejected node. - if (!EnableAASchedMI || - MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { + if (!AA || MIsNeedChainEdge(AA, MFI, SUa->getInstr(), SUb->getInstr())) { SDep Dep(SUa, isNormalMemory ? SDep::MayAliasMem : SDep::Barrier); Dep.setLatency(TrueMemOrderLatency); SUb->addPred(Dep); @@ -667,7 +677,7 @@ void addChainDependency (AliasAnalysis *AA, const MachineFrameInfo *MFI, void ScheduleDAGInstrs::initSUnits() { // We'll be allocating one SUnit for each real instruction in the region, // which is contained within a basic block. - SUnits.reserve(BB->size()); + SUnits.reserve(NumRegionInstrs); for (MachineBasicBlock::iterator I = RegionBegin; I != RegionEnd; ++I) { MachineInstr *MI = I; @@ -682,17 +692,51 @@ void ScheduleDAGInstrs::initSUnits() { // Assign the Latency field of SU using target-provided information. SU->Latency = SchedModel.computeInstrLatency(SU->getInstr()); + + // If this SUnit uses an unbuffered resource, mark it as such. + // These resources are used for in-order execution pipelines within an + // out-of-order core and are identified by BufferSize=1. BufferSize=0 is + // used for dispatch/issue groups and is not considered here. + if (SchedModel.hasInstrSchedModel()) { + const MCSchedClassDesc *SC = getSchedClass(SU); + for (TargetSchedModel::ProcResIter + PI = SchedModel.getWriteProcResBegin(SC), + PE = SchedModel.getWriteProcResEnd(SC); PI != PE; ++PI) { + switch (SchedModel.getProcResource(PI->ProcResourceIdx)->BufferSize) { + case 0: + SU->hasReservedResource = true; + break; + case 1: + SU->isUnbuffered = true; + break; + default: + break; + } + } + } } } -/// If RegPressure is non null, compute register pressure as a side effect. The +/// If RegPressure is non-null, compute register pressure as a side effect. The /// DAG builder is an efficient place to do it because it already visits /// operands. void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, - RegPressureTracker *RPTracker) { + RegPressureTracker *RPTracker, + PressureDiffs *PDiffs) { + const TargetSubtargetInfo &ST = TM.getSubtarget(); + bool UseAA = EnableAASchedMI.getNumOccurrences() > 0 ? EnableAASchedMI + : ST.useAA(); + AliasAnalysis *AAForDep = UseAA ? AA : 0; + + MISUnitMap.clear(); + ScheduleDAG::clearDAG(); + // Create an SUnit for each real instruction. initSUnits(); + if (PDiffs) + PDiffs->init(SUnits.size()); + // We build scheduling units by walking a block's instruction list from bottom // to top. @@ -718,10 +762,9 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, Uses.setUniverse(TRI->getNumRegs()); assert(VRegDefs.empty() && "Only BuildSchedGraph may access VRegDefs"); - // FIXME: Allow SparseSet to reserve space for the creation of virtual - // registers during scheduling. Don't artificially inflate the Universe - // because we want to assert that vregs are not created during DAG building. + VRegUses.clear(); VRegDefs.setUniverse(MRI.getNumVirtRegs()); + VRegUses.setUniverse(MRI.getNumVirtRegs()); // Model data dependencies between instructions being scheduled and the // ExitSU. @@ -741,17 +784,18 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, DbgMI = MI; continue; } + SUnit *SU = MISUnitMap[MI]; + assert(SU && "No SUnit mapped to this MI"); + if (RPTracker) { - RPTracker->recede(); + PressureDiff *PDiff = PDiffs ? &(*PDiffs)[SU->NodeNum] : 0; + RPTracker->recede(/*LiveUses=*/0, PDiff); assert(RPTracker->getPos() == prior(MII) && "RPTracker can't find MI"); } - assert((!MI->isTerminator() || CanHandleTerminators) && !MI->isLabel() && + assert((CanHandleTerminators || (!MI->isTerminator() && !MI->isLabel())) && "Cannot schedule terminators or labels!"); - SUnit *SU = MISUnitMap[MI]; - assert(SU && "No SUnit mapped to this MI"); - // Add register-based dependencies (data, anti, and output). bool HasVRegDef = false; for (unsigned j = 0, n = MI->getNumOperands(); j != n; ++j) { @@ -829,20 +873,20 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, unsigned ChainLatency = 0; if (AliasChain->getInstr()->mayLoad()) ChainLatency = TrueMemOrderLatency; - addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes, + addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes, ChainLatency); } AliasChain = SU; for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) - addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes, + addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes, TrueMemOrderLatency); for (MapVector::iterator I = AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) - addChainDependency(AA, MFI, SU, I->second, RejectMemNodes); + addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes); for (MapVector >::iterator I = AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) { for (unsigned i = 0, e = I->second.size(); i != e; ++i) - addChainDependency(AA, MFI, SU, I->second[i], RejectMemNodes, + addChainDependency(AAForDep, MFI, SU, I->second[i], RejectMemNodes, TrueMemOrderLatency); } adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, @@ -851,7 +895,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, AliasMemDefs.clear(); AliasMemUses.clear(); } else if (MI->mayStore()) { - SmallVector, 4> Objs; + UnderlyingObjectsVector Objs; getUnderlyingObjectsForInstr(MI, MFI, Objs); if (Objs.empty()) { @@ -860,10 +904,10 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, } bool MayAlias = false; - for (SmallVector, 4>::iterator - K = Objs.begin(), KE = Objs.end(); K != KE; ++K) { - const Value *V = K->first; - bool ThisMayAlias = K->second; + for (UnderlyingObjectsVector::iterator K = Objs.begin(), KE = Objs.end(); + K != KE; ++K) { + const Value *V = K->getPointer(); + bool ThisMayAlias = K->getInt(); if (ThisMayAlias) MayAlias = true; @@ -875,7 +919,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, MapVector::iterator IE = ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); if (I != IE) { - addChainDependency(AA, MFI, SU, I->second, RejectMemNodes, 0, true); + addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes, + 0, true); I->second = SU; } else { if (ThisMayAlias) @@ -890,7 +935,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, ((ThisMayAlias) ? AliasMemUses.end() : NonAliasMemUses.end()); if (J != JE) { for (unsigned i = 0, e = J->second.size(); i != e; ++i) - addChainDependency(AA, MFI, SU, J->second[i], RejectMemNodes, + addChainDependency(AAForDep, MFI, SU, J->second[i], RejectMemNodes, TrueMemOrderLatency, true); J->second.clear(); } @@ -899,11 +944,11 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // Add dependencies from all the PendingLoads, i.e. loads // with no underlying object. for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k) - addChainDependency(AA, MFI, SU, PendingLoads[k], RejectMemNodes, + addChainDependency(AAForDep, MFI, SU, PendingLoads[k], RejectMemNodes, TrueMemOrderLatency); // Add dependence on alias chain, if needed. if (AliasChain) - addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes); + addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes); // But we also should check dependent instructions for the // SU in question. adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, @@ -925,7 +970,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, if (MI->isInvariantLoad(AA)) { // Invariant load, no chain dependencies needed! } else { - SmallVector, 4> Objs; + UnderlyingObjectsVector Objs; getUnderlyingObjectsForInstr(MI, MFI, Objs); if (Objs.empty()) { @@ -933,7 +978,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, // potentially aliasing stores. for (MapVector::iterator I = AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I) - addChainDependency(AA, MFI, SU, I->second, RejectMemNodes); + addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes); PendingLoads.push_back(SU); MayAlias = true; @@ -941,10 +986,10 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, MayAlias = false; } - for (SmallVector, 4>::iterator + for (UnderlyingObjectsVector::iterator J = Objs.begin(), JE = Objs.end(); J != JE; ++J) { - const Value *V = J->first; - bool ThisMayAlias = J->second; + const Value *V = J->getPointer(); + bool ThisMayAlias = J->getInt(); if (ThisMayAlias) MayAlias = true; @@ -955,7 +1000,8 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, MapVector::iterator IE = ((ThisMayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end()); if (I != IE) - addChainDependency(AA, MFI, SU, I->second, RejectMemNodes, 0, true); + addChainDependency(AAForDep, MFI, SU, I->second, RejectMemNodes, + 0, true); if (ThisMayAlias) AliasMemUses[V].push_back(SU); else @@ -965,7 +1011,7 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, adjustChainDeps(AA, MFI, SU, &ExitSU, RejectMemNodes, /*Latency=*/0); // Add dependencies on alias and barrier chains, if needed. if (MayAlias && AliasChain) - addChainDependency(AA, MFI, SU, AliasChain, RejectMemNodes); + addChainDependency(AAForDep, MFI, SU, AliasChain, RejectMemNodes); if (BarrierChain) BarrierChain->addPred(SDep(SU, SDep::Barrier)); } @@ -980,6 +1026,145 @@ void ScheduleDAGInstrs::buildSchedGraph(AliasAnalysis *AA, PendingLoads.clear(); } +/// \brief Initialize register live-range state for updating kills. +void ScheduleDAGInstrs::startBlockForKills(MachineBasicBlock *BB) { + // Start with no live registers. + LiveRegs.reset(); + + // Examine the live-in regs of all successors. + for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(), + SE = BB->succ_end(); SI != SE; ++SI) { + for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(), + E = (*SI)->livein_end(); I != E; ++I) { + unsigned Reg = *I; + // Repeat, for reg and all subregs. + for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); + SubRegs.isValid(); ++SubRegs) + LiveRegs.set(*SubRegs); + } + } +} + +bool ScheduleDAGInstrs::toggleKillFlag(MachineInstr *MI, MachineOperand &MO) { + // Setting kill flag... + if (!MO.isKill()) { + MO.setIsKill(true); + return false; + } + + // If MO itself is live, clear the kill flag... + if (LiveRegs.test(MO.getReg())) { + MO.setIsKill(false); + return false; + } + + // If any subreg of MO is live, then create an imp-def for that + // subreg and keep MO marked as killed. + MO.setIsKill(false); + bool AllDead = true; + const unsigned SuperReg = MO.getReg(); + MachineInstrBuilder MIB(MF, MI); + for (MCSubRegIterator SubRegs(SuperReg, TRI); SubRegs.isValid(); ++SubRegs) { + if (LiveRegs.test(*SubRegs)) { + MIB.addReg(*SubRegs, RegState::ImplicitDefine); + AllDead = false; + } + } + + if(AllDead) + MO.setIsKill(true); + return false; +} + +// FIXME: Reuse the LivePhysRegs utility for this. +void ScheduleDAGInstrs::fixupKills(MachineBasicBlock *MBB) { + DEBUG(dbgs() << "Fixup kills for BB#" << MBB->getNumber() << '\n'); + + LiveRegs.resize(TRI->getNumRegs()); + BitVector killedRegs(TRI->getNumRegs()); + + startBlockForKills(MBB); + + // Examine block from end to start... + unsigned Count = MBB->size(); + for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin(); + I != E; --Count) { + MachineInstr *MI = --I; + if (MI->isDebugValue()) + continue; + + // Update liveness. Registers that are defed but not used in this + // instruction are now dead. Mark register and all subregs as they + // are completely defined. + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (MO.isRegMask()) + LiveRegs.clearBitsNotInMask(MO.getRegMask()); + if (!MO.isReg()) continue; + unsigned Reg = MO.getReg(); + if (Reg == 0) continue; + if (!MO.isDef()) continue; + // Ignore two-addr defs. + if (MI->isRegTiedToUseOperand(i)) continue; + + // Repeat for reg and all subregs. + for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); + SubRegs.isValid(); ++SubRegs) + LiveRegs.reset(*SubRegs); + } + + // Examine all used registers and set/clear kill flag. When a + // register is used multiple times we only set the kill flag on + // the first use. Don't set kill flags on undef operands. + killedRegs.reset(); + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; + unsigned Reg = MO.getReg(); + if ((Reg == 0) || MRI.isReserved(Reg)) continue; + + bool kill = false; + if (!killedRegs.test(Reg)) { + kill = true; + // A register is not killed if any subregs are live... + for (MCSubRegIterator SubRegs(Reg, TRI); SubRegs.isValid(); ++SubRegs) { + if (LiveRegs.test(*SubRegs)) { + kill = false; + break; + } + } + + // If subreg is not live, then register is killed if it became + // live in this instruction + if (kill) + kill = !LiveRegs.test(Reg); + } + + if (MO.isKill() != kill) { + DEBUG(dbgs() << "Fixing " << MO << " in "); + // Warning: toggleKillFlag may invalidate MO. + toggleKillFlag(MI, MO); + DEBUG(MI->dump()); + } + + killedRegs.set(Reg); + } + + // Mark any used register (that is not using undef) and subregs as + // now live... + for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) { + MachineOperand &MO = MI->getOperand(i); + if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue; + unsigned Reg = MO.getReg(); + if ((Reg == 0) || MRI.isReserved(Reg)) continue; + + for (MCSubRegIterator SubRegs(Reg, TRI, /*IncludeSelf=*/true); + SubRegs.isValid(); ++SubRegs) + LiveRegs.set(*SubRegs); + } + } +} + void ScheduleDAGInstrs::dumpNode(const SUnit *SU) const { #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP) SU->getInstr()->dump(); @@ -994,7 +1179,7 @@ std::string ScheduleDAGInstrs::getGraphNodeLabel(const SUnit *SU) const { else if (SU == &ExitSU) oss << ""; else - SU->getInstr()->print(oss); + SU->getInstr()->print(oss, &TM, /*SkipOpers=*/true); return oss.str(); } @@ -1018,31 +1203,39 @@ class SchedDFSImpl { /// List PredSU, SuccSU pairs that represent data edges between subtrees. std::vector > ConnectionPairs; + struct RootData { + unsigned NodeID; + unsigned ParentNodeID; // Parent node (member of the parent subtree). + unsigned SubInstrCount; // Instr count in this tree only, not children. + + RootData(unsigned id): NodeID(id), + ParentNodeID(SchedDFSResult::InvalidSubtreeID), + SubInstrCount(0) {} + + unsigned getSparseSetIndex() const { return NodeID; } + }; + + SparseSet RootSet; + public: - SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSData.size()) {} + SchedDFSImpl(SchedDFSResult &r): R(r), SubtreeClasses(R.DFSNodeData.size()) { + RootSet.setUniverse(R.DFSNodeData.size()); + } /// Return true if this node been visited by the DFS traversal. /// /// During visitPostorderNode the Node's SubtreeID is assigned to the Node /// ID. Later, SubtreeID is updated but remains valid. bool isVisited(const SUnit *SU) const { - return R.DFSData[SU->NodeNum].SubtreeID != SchedDFSResult::InvalidSubtreeID; + return R.DFSNodeData[SU->NodeNum].SubtreeID + != SchedDFSResult::InvalidSubtreeID; } /// Initialize this node's instruction count. We don't need to flag the node /// visited until visitPostorder because the DAG cannot have cycles. void visitPreorder(const SUnit *SU) { - R.DFSData[SU->NodeNum].InstrCount = SU->getInstr()->isTransient() ? 0 : 1; - R.DFSData[SU->NodeNum].SubInstrCount = R.DFSData[SU->NodeNum].InstrCount; - } - - /// Called once for each tree edge after calling visitPostOrderNode on the - /// predecessor. Increment the parent node's instruction count and - /// preemptively join this subtree to its parent's if it is small enough. - void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { - R.DFSData[Succ->NodeNum].InstrCount - += R.DFSData[PredDep.getSUnit()->NodeNum].InstrCount; - joinPredSubtree(PredDep, Succ); + R.DFSNodeData[SU->NodeNum].InstrCount = + SU->getInstr()->isTransient() ? 0 : 1; } /// Called once for each node after all predecessors are visited. Revisit this @@ -1051,22 +1244,50 @@ public: void visitPostorderNode(const SUnit *SU) { // Mark this node as the root of a subtree. It may be joined with its // successors later. - R.DFSData[SU->NodeNum].SubtreeID = SU->NodeNum; + R.DFSNodeData[SU->NodeNum].SubtreeID = SU->NodeNum; + RootData RData(SU->NodeNum); + RData.SubInstrCount = SU->getInstr()->isTransient() ? 0 : 1; // If any predecessors are still in their own subtree, they either cannot be // joined or are large enough to remain separate. If this parent node's // total instruction count is not greater than a child subtree by at least // the subtree limit, then try to join it now since splitting subtrees is // only useful if multiple high-pressure paths are possible. - unsigned InstrCount = R.DFSData[SU->NodeNum].InstrCount; + unsigned InstrCount = R.DFSNodeData[SU->NodeNum].InstrCount; for (SUnit::const_pred_iterator PI = SU->Preds.begin(), PE = SU->Preds.end(); PI != PE; ++PI) { if (PI->getKind() != SDep::Data) continue; unsigned PredNum = PI->getSUnit()->NodeNum; - if ((InstrCount - R.DFSData[PredNum].InstrCount) < R.SubtreeLimit) + if ((InstrCount - R.DFSNodeData[PredNum].InstrCount) < R.SubtreeLimit) joinPredSubtree(*PI, SU, /*CheckLimit=*/false); + + // Either link or merge the TreeData entry from the child to the parent. + if (R.DFSNodeData[PredNum].SubtreeID == PredNum) { + // If the predecessor's parent is invalid, this is a tree edge and the + // current node is the parent. + if (RootSet[PredNum].ParentNodeID == SchedDFSResult::InvalidSubtreeID) + RootSet[PredNum].ParentNodeID = SU->NodeNum; + } + else if (RootSet.count(PredNum)) { + // The predecessor is not a root, but is still in the root set. This + // must be the new parent that it was just joined to. Note that + // RootSet[PredNum].ParentNodeID may either be invalid or may still be + // set to the original parent. + RData.SubInstrCount += RootSet[PredNum].SubInstrCount; + RootSet.erase(PredNum); + } } + RootSet[SU->NodeNum] = RData; + } + + /// Called once for each tree edge after calling visitPostOrderNode on the + /// predecessor. Increment the parent node's instruction count and + /// preemptively join this subtree to its parent's if it is small enough. + void visitPostorderEdge(const SDep &PredDep, const SUnit *Succ) { + R.DFSNodeData[Succ->NodeNum].InstrCount + += R.DFSNodeData[PredDep.getSUnit()->NodeNum].InstrCount; + joinPredSubtree(PredDep, Succ); } /// Add a connection for cross edges. @@ -1078,13 +1299,27 @@ public: /// between trees. void finalize() { SubtreeClasses.compress(); + R.DFSTreeData.resize(SubtreeClasses.getNumClasses()); + assert(SubtreeClasses.getNumClasses() == RootSet.size() + && "number of roots should match trees"); + for (SparseSet::const_iterator + RI = RootSet.begin(), RE = RootSet.end(); RI != RE; ++RI) { + unsigned TreeID = SubtreeClasses[RI->NodeID]; + if (RI->ParentNodeID != SchedDFSResult::InvalidSubtreeID) + R.DFSTreeData[TreeID].ParentTreeID = SubtreeClasses[RI->ParentNodeID]; + R.DFSTreeData[TreeID].SubInstrCount = RI->SubInstrCount; + // Note that SubInstrCount may be greater than InstrCount if we joined + // subtrees across a cross edge. InstrCount will be attributed to the + // original parent, while SubInstrCount will be attributed to the joined + // parent. + } R.SubtreeConnections.resize(SubtreeClasses.getNumClasses()); R.SubtreeConnectLevels.resize(SubtreeClasses.getNumClasses()); DEBUG(dbgs() << R.getNumSubtrees() << " subtrees:\n"); - for (unsigned Idx = 0, End = R.DFSData.size(); Idx != End; ++Idx) { - R.DFSData[Idx].SubtreeID = SubtreeClasses[Idx]; + for (unsigned Idx = 0, End = R.DFSNodeData.size(); Idx != End; ++Idx) { + R.DFSNodeData[Idx].SubtreeID = SubtreeClasses[Idx]; DEBUG(dbgs() << " SU(" << Idx << ") in tree " - << R.DFSData[Idx].SubtreeID << '\n'); + << R.DFSNodeData[Idx].SubtreeID << '\n'); } for (std::vector >::const_iterator I = ConnectionPairs.begin(), E = ConnectionPairs.end(); @@ -1109,7 +1344,7 @@ protected: // Check if the predecessor is already joined. const SUnit *PredSU = PredDep.getSUnit(); unsigned PredNum = PredSU->NodeNum; - if (R.DFSData[PredNum].SubtreeID != PredNum) + if (R.DFSNodeData[PredNum].SubtreeID != PredNum) return false; // Four is the magic number of successors before a node is considered a @@ -1122,11 +1357,9 @@ protected: return false; } } - if (CheckLimit && R.DFSData[PredNum].SubInstrCount > R.SubtreeLimit) + if (CheckLimit && R.DFSNodeData[PredNum].InstrCount > R.SubtreeLimit) return false; - - R.DFSData[PredNum].SubtreeID = Succ->NodeNum; - R.DFSData[Succ->NodeNum].SubInstrCount += R.DFSData[PredNum].SubInstrCount; + R.DFSNodeData[PredNum].SubtreeID = Succ->NodeNum; SubtreeClasses.join(Succ->NodeNum, PredNum); return true; } @@ -1136,16 +1369,19 @@ protected: if (!Depth) return; - SmallVectorImpl &Connections = - R.SubtreeConnections[FromTree]; - for (SmallVectorImpl::iterator - I = Connections.begin(), E = Connections.end(); I != E; ++I) { - if (I->TreeID == ToTree) { - I->Level = std::max(I->Level, Depth); - return; + do { + SmallVectorImpl &Connections = + R.SubtreeConnections[FromTree]; + for (SmallVectorImpl::iterator + I = Connections.begin(), E = Connections.end(); I != E; ++I) { + if (I->TreeID == ToTree) { + I->Level = std::max(I->Level, Depth); + return; + } } - } - Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); + Connections.push_back(SchedDFSResult::Connection(ToTree, Depth)); + FromTree = R.DFSTreeData[FromTree].ParentTreeID; + } while (FromTree != SchedDFSResult::InvalidSubtreeID); } }; } // namespace llvm @@ -1180,7 +1416,7 @@ public: static bool hasDataSucc(const SUnit *SU) { for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end(); SI != SE; ++SI) { - if (SI->getKind() == SDep::Data) + if (SI->getKind() == SDep::Data && !SI->getSUnit()->isBoundaryNode()) return true; } return false; @@ -1188,24 +1424,30 @@ static bool hasDataSucc(const SUnit *SU) { /// Compute an ILP metric for all nodes in the subDAG reachable via depth-first /// search from this root. -void SchedDFSResult::compute(ArrayRef Roots) { +void SchedDFSResult::compute(ArrayRef SUnits) { if (!IsBottomUp) llvm_unreachable("Top-down ILP metric is unimplemnted"); SchedDFSImpl Impl(*this); - for (ArrayRef::const_iterator - RootI = Roots.begin(), RootE = Roots.end(); RootI != RootE; ++RootI) { + for (ArrayRef::const_iterator + SI = SUnits.begin(), SE = SUnits.end(); SI != SE; ++SI) { + const SUnit *SU = &*SI; + if (Impl.isVisited(SU) || hasDataSucc(SU)) + continue; + SchedDAGReverseDFS DFS; - Impl.visitPreorder(*RootI); - DFS.follow(*RootI); + Impl.visitPreorder(SU); + DFS.follow(SU); for (;;) { // Traverse the leftmost path as far as possible. while (DFS.getPred() != DFS.getPredEnd()) { const SDep &PredDep = *DFS.getPred(); DFS.advance(); // Ignore non-data edges. - if (PredDep.getKind() != SDep::Data) + if (PredDep.getKind() != SDep::Data + || PredDep.getSUnit()->isBoundaryNode()) { continue; + } // An already visited edge is a cross edge, assuming an acyclic DAG. if (Impl.isVisited(PredDep.getSUnit())) { Impl.visitCrossEdge(PredDep, DFS.getCurr());