X-Git-Url: http://demsky.eecs.uci.edu/git/?a=blobdiff_plain;f=lib%2FCodeGen%2FSelectionDAG%2FScheduleDAGRRList.cpp;h=6ea7c80687820ec878251f7c200c7edaaed91816;hb=4ee451de366474b9c228b4e5fa573795a715216d;hp=61a005ce10ed6342587dfc81cc63f2e44f2ca235;hpb=42d60274eaa70f8cdbed76d04d25d7a8fc1237cb;p=oota-llvm.git diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp index 61a005ce10e..6ea7c806878 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGRRList.cpp @@ -2,8 +2,8 @@ // // The LLVM Compiler Infrastructure // -// This file was developed by Evan Cheng and is distributed under the -// University of Illinois Open Source License. See LICENSE.TXT for details. +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // @@ -25,6 +25,7 @@ #include "llvm/Target/TargetInstrInfo.h" #include "llvm/Support/Debug.h" #include "llvm/Support/Compiler.h" +#include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallSet.h" #include "llvm/ADT/Statistic.h" #include @@ -32,6 +33,11 @@ #include "llvm/Support/CommandLine.h" using namespace llvm; +STATISTIC(NumBacktracks, "Number of times scheduler backtraced"); +STATISTIC(NumUnfolds, "Number of nodes unfolded"); +STATISTIC(NumDups, "Number of duplicated nodes"); +STATISTIC(NumCCCopies, "Number of cross class copies"); + static RegisterScheduler burrListDAGScheduler("list-burr", " Bottom-up register reduction list scheduling", @@ -86,10 +92,11 @@ private: void UnscheduleNodeBottomUp(SUnit*); void BacktrackBottomUp(SUnit*, unsigned, unsigned&); SUnit *CopyAndMoveSuccessors(SUnit*); - SUnit *InsertCopiesAndMoveSuccs(SUnit*, unsigned, + void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned, + const TargetRegisterClass*, const TargetRegisterClass*, - const TargetRegisterClass*); - bool DelayForLiveRegsBottomUp(SUnit*, unsigned&); + SmallVector&); + bool DelayForLiveRegsBottomUp(SUnit*, SmallVector&); void ListScheduleTopDown(); void ListScheduleBottomUp(); void CommuteNodesToReducePressure(); @@ -149,7 +156,7 @@ void ScheduleDAGRRList::CommuteNodesToReducePressure() { continue; SDNode *OpN = SU->Node->getOperand(j).Val; - SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo]; + SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; if (OpSU && OperandSeen.count(OpSU) == 1) { // Ok, so SU is not the last use of OpSU, but SU is two-address so // it will clobber OpSU. Try to commute SU if no other source operands @@ -158,7 +165,7 @@ void ScheduleDAGRRList::CommuteNodesToReducePressure() { for (unsigned k = 0; k < NumOps; ++k) { if (k != j) { OpN = SU->Node->getOperand(k).Val; - OpSU = SUnitMap[OpN][SU->InstanceNo]; + OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo]; if (OpSU && OperandSeen.count(OpSU) == 1) { DoCommute = false; break; @@ -196,13 +203,10 @@ void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, // interrupt model (drain vs. freeze). PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency); - if (!isChain) - --PredSU->NumSuccsLeft; - else - --PredSU->NumChainSuccsLeft; + --PredSU->NumSuccsLeft; #ifndef NDEBUG - if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) { + if (PredSU->NumSuccsLeft < 0) { cerr << "*** List scheduling failed! ***\n"; PredSU->dump(&DAG); cerr << " has been released too many times!\n"; @@ -210,7 +214,7 @@ void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain, } #endif - if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) { + if (PredSU->NumSuccsLeft == 0) { // EntryToken has to go last! Special case it here. if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) { PredSU->isAvailable = true; @@ -281,10 +285,7 @@ void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) { AvailableQueue->remove(PredSU); } - if (!isChain) - ++PredSU->NumSuccsLeft; - else - ++PredSU->NumChainSuccsLeft; + ++PredSU->NumSuccsLeft; } /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and @@ -326,6 +327,40 @@ void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) { AvailableQueue->push(SU); } +// FIXME: This is probably too slow! +static void isReachable(SUnit *SU, SUnit *TargetSU, + SmallPtrSet &Visited, bool &Reached) { + if (Reached) return; + if (SU == TargetSU) { + Reached = true; + return; + } + if (!Visited.insert(SU)) return; + + for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; + ++I) + isReachable(I->Dep, TargetSU, Visited, Reached); +} + +static bool isReachable(SUnit *SU, SUnit *TargetSU) { + SmallPtrSet Visited; + bool Reached = false; + isReachable(SU, TargetSU, Visited, Reached); + return Reached; +} + +/// willCreateCycle - Returns true if adding an edge from SU to TargetSU will +/// create a cycle. +static bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) { + if (isReachable(TargetSU, SU)) + return true; + for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); + I != E; ++I) + if (I->Cost < 0 && isReachable(TargetSU, I->Dep)) + return true; + return false; +} + /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in /// BTCycle in order to schedule a specific node. Returns the last unscheduled /// SUnit. Also returns if a successor is unscheduled in the process. @@ -347,34 +382,159 @@ void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle, assert(false && "Something is wrong!"); abort(); } + + ++NumBacktracks; } -/// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned, -/// i.e. the node does not produce a flag, it does not read a flag and it does -/// not have an incoming chain. -static bool isSafeToCopy(SDNode *N) { - if (!N) - return true; +/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled +/// successors to the newly created node. +SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { + if (SU->FlaggedNodes.size()) + return NULL; - for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) - if (N->getValueType(i) == MVT::Flag) - return false; + SDNode *N = SU->Node; + if (!N) + return NULL; + + SUnit *NewSU; + bool TryUnfold = false; + for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) { + MVT::ValueType VT = N->getValueType(i); + if (VT == MVT::Flag) + return NULL; + else if (VT == MVT::Other) + TryUnfold = true; + } for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) { const SDOperand &Op = N->getOperand(i); MVT::ValueType VT = Op.Val->getValueType(Op.ResNo); - if (VT == MVT::Other || VT == MVT::Flag) - return false; + if (VT == MVT::Flag) + return NULL; } - return true; -} + if (TryUnfold) { + SmallVector NewNodes; + if (!MRI->unfoldMemoryOperand(DAG, N, NewNodes)) + return NULL; + + DOUT << "Unfolding SU # " << SU->NodeNum << "\n"; + assert(NewNodes.size() == 2 && "Expected a load folding node!"); + + N = NewNodes[1]; + SDNode *LoadNode = NewNodes[0]; + unsigned NumVals = N->getNumValues(); + unsigned OldNumVals = SU->Node->getNumValues(); + for (unsigned i = 0; i != NumVals; ++i) + DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i)); + DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1), + SDOperand(LoadNode, 1)); + + SUnit *NewSU = NewSUnit(N); + SUnitMap[N].push_back(NewSU); + const TargetInstrDescriptor *TID = &TII->get(N->getTargetOpcode()); + for (unsigned i = 0; i != TID->numOperands; ++i) { + if (TID->getOperandConstraint(i, TOI::TIED_TO) != -1) { + NewSU->isTwoAddress = true; + break; + } + } + if (TID->Flags & M_COMMUTABLE) + NewSU->isCommutable = true; + // FIXME: Calculate height / depth and propagate the changes? + NewSU->Depth = SU->Depth; + NewSU->Height = SU->Height; + ComputeLatency(NewSU); + + // LoadNode may already exist. This can happen when there is another + // load from the same location and producing the same type of value + // but it has different alignment or volatileness. + bool isNewLoad = true; + SUnit *LoadSU; + DenseMap >::iterator SMI = + SUnitMap.find(LoadNode); + if (SMI != SUnitMap.end()) { + LoadSU = SMI->second.front(); + isNewLoad = false; + } else { + LoadSU = NewSUnit(LoadNode); + SUnitMap[LoadNode].push_back(LoadSU); -/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled -/// successors to the newly created node. -SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { - DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; + LoadSU->Depth = SU->Depth; + LoadSU->Height = SU->Height; + ComputeLatency(LoadSU); + } - SUnit *NewSU = Clone(SU); + SUnit *ChainPred = NULL; + SmallVector ChainSuccs; + SmallVector LoadPreds; + SmallVector NodePreds; + SmallVector NodeSuccs; + for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); + I != E; ++I) { + if (I->isCtrl) + ChainPred = I->Dep; + else if (I->Dep->Node && I->Dep->Node->isOperand(LoadNode)) + LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); + else + NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false)); + } + for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); + I != E; ++I) { + if (I->isCtrl) + ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, + I->isCtrl, I->isSpecial)); + else + NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost, + I->isCtrl, I->isSpecial)); + } + + SU->removePred(ChainPred, true, false); + if (isNewLoad) + LoadSU->addPred(ChainPred, true, false); + for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) { + SDep *Pred = &LoadPreds[i]; + SU->removePred(Pred->Dep, Pred->isCtrl, Pred->isSpecial); + if (isNewLoad) + LoadSU->addPred(Pred->Dep, Pred->isCtrl, Pred->isSpecial, + Pred->Reg, Pred->Cost); + } + for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) { + SDep *Pred = &NodePreds[i]; + SU->removePred(Pred->Dep, Pred->isCtrl, Pred->isSpecial); + NewSU->addPred(Pred->Dep, Pred->isCtrl, Pred->isSpecial, + Pred->Reg, Pred->Cost); + } + for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) { + SDep *Succ = &NodeSuccs[i]; + Succ->Dep->removePred(SU, Succ->isCtrl, Succ->isSpecial); + Succ->Dep->addPred(NewSU, Succ->isCtrl, Succ->isSpecial, + Succ->Reg, Succ->Cost); + } + for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) { + SDep *Succ = &ChainSuccs[i]; + Succ->Dep->removePred(SU, Succ->isCtrl, Succ->isSpecial); + if (isNewLoad) + Succ->Dep->addPred(LoadSU, Succ->isCtrl, Succ->isSpecial, + Succ->Reg, Succ->Cost); + } + if (isNewLoad) + NewSU->addPred(LoadSU, false, false); + + if (isNewLoad) + AvailableQueue->addNode(LoadSU); + AvailableQueue->addNode(NewSU); + + ++NumUnfolds; + + if (NewSU->NumSuccsLeft == 0) { + NewSU->isAvailable = true; + return NewSU; + } + SU = NewSU; + } + + DOUT << "Duplicating SU # " << SU->NodeNum << "\n"; + NewSU = Clone(SU); // New SUnit has the exact same predecessors. for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); @@ -386,34 +546,36 @@ SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) { // Only copy scheduled successors. Cut them from old node's successor // list and move them over. - SmallVector DelDeps; + SmallVector, 4> DelDeps; for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { if (I->isSpecial) continue; - NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); if (I->Dep->isScheduled) { + NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1); I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost); - DelDeps.push_back(I); + DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); } } for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { - SUnit *Succ = DelDeps[i]->Dep; - bool isCtrl = DelDeps[i]->isCtrl; + SUnit *Succ = DelDeps[i].first; + bool isCtrl = DelDeps[i].second; Succ->removePred(SU, isCtrl, false); } AvailableQueue->updateNode(SU); AvailableQueue->addNode(NewSU); + ++NumDups; return NewSU; } -/// InsertCopiesAndMoveSuccs - Insert expensive cross register class copies and -/// move all scheduled successors of the given SUnit to the last copy. -SUnit *ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, - const TargetRegisterClass *DestRC, - const TargetRegisterClass *SrcRC) { +/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies +/// and move all scheduled successors of the given SUnit to the last copy. +void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, + const TargetRegisterClass *DestRC, + const TargetRegisterClass *SrcRC, + SmallVector &Copies) { SUnit *CopyFromSU = NewSUnit(NULL); CopyFromSU->CopySrcRC = SrcRC; CopyFromSU->CopyDstRC = DestRC; @@ -426,20 +588,20 @@ SUnit *ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, // Only copy scheduled successors. Cut them from old node's successor // list and move them over. - SmallVector DelDeps; + SmallVector, 4> DelDeps; for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { if (I->isSpecial) continue; - CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); if (I->Dep->isScheduled) { + CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1); I->Dep->addPred(CopyToSU, I->isCtrl, false, I->Reg, I->Cost); - DelDeps.push_back(I); + DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl)); } } for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) { - SUnit *Succ = DelDeps[i]->Dep; - bool isCtrl = DelDeps[i]->isCtrl; + SUnit *Succ = DelDeps[i].first; + bool isCtrl = DelDeps[i].second; Succ->removePred(SU, isCtrl, false); } @@ -449,8 +611,10 @@ SUnit *ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg, AvailableQueue->updateNode(SU); AvailableQueue->addNode(CopyFromSU); AvailableQueue->addNode(CopyToSU); + Copies.push_back(CopyFromSU); + Copies.push_back(CopyToSU); - return CopyToSU; + ++NumCCCopies; } /// getPhysicalRegisterVT - Returns the ValueType of the physical register @@ -469,51 +633,31 @@ static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg, return N->getValueType(NumRes); } -// FIXME: This is probably too slow! -static void isReachable(SUnit *SU, SUnit *TargetSU, - SmallPtrSet &Visited, bool &Reached) { - if (Reached) return; - if (SU == TargetSU) { - Reached = true; - return; - } - if (!Visited.insert(SU)) return; - - for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; - ++I) - isReachable(I->Dep, TargetSU, Visited, Reached); -} - -static bool isReachable(SUnit *SU, SUnit *TargetSU) { - SmallPtrSet Visited; - bool Reached = false; - isReachable(SU, TargetSU, Visited, Reached); - return Reached; -} - /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay /// scheduling of the given node to satisfy live physical register dependencies. /// If the specific node is the last one that's available to schedule, do /// whatever is necessary (i.e. backtracking or cloning) to make it possible. -bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle){ +bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, + SmallVector &LRegs){ if (LiveRegs.empty()) return false; + SmallSet RegAdded; // If this node would clobber any "live" register, then it's not ready. - // However, if this is the last "available" node, then we may have to - // backtrack. - bool MustSched = AvailableQueue->empty(); - SmallVector LRegs; for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { if (I->Cost < 0) { unsigned Reg = I->Reg; - if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) - LRegs.push_back(Reg); + if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) { + if (RegAdded.insert(Reg)) + LRegs.push_back(Reg); + } for (const unsigned *Alias = MRI->getAliasSet(Reg); *Alias; ++Alias) - if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) - LRegs.push_back(*Alias); + if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) { + if (RegAdded.insert(*Alias)) + LRegs.push_back(*Alias); + } } } @@ -525,77 +669,22 @@ bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU, unsigned &CurCycle){ if (!TID.ImplicitDefs) continue; for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) { - if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) - LRegs.push_back(*Reg); + if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) { + if (RegAdded.insert(*Reg)) + LRegs.push_back(*Reg); + } for (const unsigned *Alias = MRI->getAliasSet(*Reg); *Alias; ++Alias) - if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) - LRegs.push_back(*Alias); - } - } - - if (MustSched && !LRegs.empty()) { - // We have made a mistake by scheduling some nodes too early. Now we must - // schedule the current node which will end up clobbering some live - // registers that are expensive / impossible to copy. Try unscheduling - // up to the point where it's safe to schedule the current node. - unsigned LiveCycle = CurCycle; - for (unsigned i = 0, e = LRegs.size(); i != e; ++i) { - unsigned Reg = LRegs[i]; - unsigned LCycle = LiveRegCycles[Reg]; - LiveCycle = std::min(LiveCycle, LCycle); - } - - SUnit *OldSU = Sequence[LiveCycle]; - if (!isReachable(Sequence[LiveCycle], SU)) { - // If CycleBound is greater than backtrack cycle, then some of SU - // successors are going to be unscheduled. - bool SuccUnsched = SU->CycleBound > LiveCycle; - BacktrackBottomUp(SU, LiveCycle, CurCycle); - // Force the current node to be scheduled before the node that - // requires the physical reg dep. - if (OldSU->isAvailable) { - OldSU->isAvailable = false; - AvailableQueue->remove(OldSU); - } - SU->addPred(OldSU, true, true); - // If a successor has been unscheduled, then it's not possible to - // schedule the current node. - return SuccUnsched; - } else { - // Try duplicating the nodes that produces these "expensive to copy" - // values to break the dependency. - assert(LRegs.size() == 1 && "Can't handle this yet!"); - unsigned Reg = LRegs[0]; - SUnit *LRDef = LiveRegDefs[Reg]; - SUnit *NewDef; - if (isSafeToCopy(LRDef->Node)) - NewDef = CopyAndMoveSuccessors(LRDef); - else { - // Issue expensive cross register class copies. - MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); - const TargetRegisterClass *RC = - MRI->getPhysicalRegisterRegClass(VT, Reg); - const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC); - if (!DestRC) { - assert(false && "Don't know how to copy this physical register!"); - abort(); + if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) { + if (RegAdded.insert(*Alias)) + LRegs.push_back(*Alias); } - NewDef = InsertCopiesAndMoveSuccs(LRDef,Reg,DestRC,RC); - } - - DOUT << "Adding an edge from SU # " << SU->NodeNum - << " to SU #" << NewDef->NodeNum << "\n"; - LiveRegDefs[Reg] = NewDef; - NewDef->addPred(SU, true, true); - SU->isAvailable = false; - AvailableQueue->push(NewDef); - return true; } } return !LRegs.empty(); } + /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up /// schedulers. void ScheduleDAGRRList::ListScheduleBottomUp() { @@ -609,24 +698,109 @@ void ScheduleDAGRRList::ListScheduleBottomUp() { // priority. If it is not ready put it back. Schedule the node. SmallVector NotReady; while (!AvailableQueue->empty()) { + bool Delayed = false; + DenseMap > LRegsMap; SUnit *CurSU = AvailableQueue->pop(); while (CurSU) { - if (CurSU->CycleBound <= CurCycle) - if (!DelayForLiveRegsBottomUp(CurSU, CurCycle)) + if (CurSU->CycleBound <= CurCycle) { + SmallVector LRegs; + if (!DelayForLiveRegsBottomUp(CurSU, LRegs)) break; - - // Verify node is still ready. It may not be in case the - // scheduler has backtracked. - if (CurSU->isAvailable) { - CurSU->isPending = true; - NotReady.push_back(CurSU); + Delayed = true; + LRegsMap.insert(std::make_pair(CurSU, LRegs)); } + + CurSU->isPending = true; // This SU is not in AvailableQueue right now. + NotReady.push_back(CurSU); CurSU = AvailableQueue->pop(); } - + + // All candidates are delayed due to live physical reg dependencies. + // Try backtracking, code duplication, or inserting cross class copies + // to resolve it. + if (Delayed && !CurSU) { + for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { + SUnit *TrySU = NotReady[i]; + SmallVector &LRegs = LRegsMap[TrySU]; + + // Try unscheduling up to the point where it's safe to schedule + // this node. + unsigned LiveCycle = CurCycle; + for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) { + unsigned Reg = LRegs[j]; + unsigned LCycle = LiveRegCycles[Reg]; + LiveCycle = std::min(LiveCycle, LCycle); + } + SUnit *OldSU = Sequence[LiveCycle]; + if (!WillCreateCycle(TrySU, OldSU)) { + BacktrackBottomUp(TrySU, LiveCycle, CurCycle); + // Force the current node to be scheduled before the node that + // requires the physical reg dep. + if (OldSU->isAvailable) { + OldSU->isAvailable = false; + AvailableQueue->remove(OldSU); + } + TrySU->addPred(OldSU, true, true); + // If one or more successors has been unscheduled, then the current + // node is no longer avaialable. Schedule a successor that's now + // available instead. + if (!TrySU->isAvailable) + CurSU = AvailableQueue->pop(); + else { + CurSU = TrySU; + TrySU->isPending = false; + NotReady.erase(NotReady.begin()+i); + } + break; + } + } + + if (!CurSU) { + // Can't backtrace. Try duplicating the nodes that produces these + // "expensive to copy" values to break the dependency. In case even + // that doesn't work, insert cross class copies. + SUnit *TrySU = NotReady[0]; + SmallVector &LRegs = LRegsMap[TrySU]; + assert(LRegs.size() == 1 && "Can't handle this yet!"); + unsigned Reg = LRegs[0]; + SUnit *LRDef = LiveRegDefs[Reg]; + SUnit *NewDef = CopyAndMoveSuccessors(LRDef); + if (!NewDef) { + // Issue expensive cross register class copies. + MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII); + const TargetRegisterClass *RC = + MRI->getPhysicalRegisterRegClass(VT, Reg); + const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC); + if (!DestRC) { + assert(false && "Don't know how to copy this physical register!"); + abort(); + } + SmallVector Copies; + InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies); + DOUT << "Adding an edge from SU # " << TrySU->NodeNum + << " to SU #" << Copies.front()->NodeNum << "\n"; + TrySU->addPred(Copies.front(), true, true); + NewDef = Copies.back(); + } + + DOUT << "Adding an edge from SU # " << NewDef->NodeNum + << " to SU #" << TrySU->NodeNum << "\n"; + LiveRegDefs[Reg] = NewDef; + NewDef->addPred(TrySU, true, true); + TrySU->isAvailable = false; + CurSU = NewDef; + } + + if (!CurSU) { + assert(false && "Unable to resolve live physical register dependencies!"); + abort(); + } + } + // Add the nodes that aren't ready back onto the available list. for (unsigned i = 0, e = NotReady.size(); i != e; ++i) { NotReady[i]->isPending = false; + // May no longer be available due to backtracking. if (NotReady[i]->isAvailable) AvailableQueue->push(NotReady[i]); } @@ -655,7 +829,7 @@ void ScheduleDAGRRList::ListScheduleBottomUp() { // Verify that all SUnits were scheduled. bool AnyNotSched = false; for (unsigned i = 0, e = SUnits.size(); i != e; ++i) { - if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) { + if (SUnits[i].NumSuccsLeft != 0) { if (!AnyNotSched) cerr << "*** List scheduling failed! ***\n"; SUnits[i].dump(&DAG); @@ -681,13 +855,10 @@ void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, // interrupt model (drain vs. freeze). SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency); - if (!isChain) - --SuccSU->NumPredsLeft; - else - --SuccSU->NumChainPredsLeft; + --SuccSU->NumPredsLeft; #ifndef NDEBUG - if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) { + if (SuccSU->NumPredsLeft < 0) { cerr << "*** List scheduling failed! ***\n"; SuccSU->dump(&DAG); cerr << " has been released too many times!\n"; @@ -695,7 +866,7 @@ void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain, } #endif - if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) { + if (SuccSU->NumPredsLeft == 0) { SuccSU->isAvailable = true; AvailableQueue->push(SuccSU); } @@ -893,9 +1064,11 @@ namespace { std::vector SethiUllmanNumbers; const TargetInstrInfo *TII; + const MRegisterInfo *MRI; public: - explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii) - : TII(tii) {} + explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii, + const MRegisterInfo *mri) + : TII(tii), MRI(mri) {} void initNodes(DenseMap > &sumap, std::vector &sunits) { @@ -934,6 +1107,11 @@ namespace { // CopyToReg should be close to its uses to facilitate coalescing and // avoid spilling. return 0; + else if (Opc == TargetInstrInfo::EXTRACT_SUBREG || + Opc == TargetInstrInfo::INSERT_SUBREG) + // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to + // facilitate coalescing. + return 0; else if (SU->NumSuccs == 0) // If SU does not have a use, i.e. it doesn't produce a value that would // be consumed (e.g. store), then it terminates a chain of computation. @@ -1030,13 +1208,13 @@ static unsigned calcMaxScratches(const SUnit *SU) { for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) { if (I->isCtrl) continue; // ignore chain preds - if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg) + if (I->Dep->Node->getOpcode() != ISD::CopyFromReg) Scratches++; } for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) { if (I->isCtrl) continue; // ignore chain succs - if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg) + if (I->Dep->Node->getOpcode() != ISD::CopyToReg) Scratches += 10; } return Scratches; @@ -1115,7 +1293,8 @@ bool BURegReductionPriorityQueue::canClobber(SUnit *SU, SUnit *Op) { for (unsigned i = 0; i != NumOps; ++i) { if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) { SDNode *DU = SU->Node->getOperand(i).Val; - if (Op == (*SUnitMap)[DU][SU->InstanceNo]) + if ((*SUnitMap).find(DU) != (*SUnitMap).end() && + Op == (*SUnitMap)[DU][SU->InstanceNo]) return true; } } @@ -1124,10 +1303,53 @@ bool BURegReductionPriorityQueue::canClobber(SUnit *SU, SUnit *Op) { } +/// hasCopyToRegUse - Return true if SU has a value successor that is a +/// CopyToReg node. +static bool hasCopyToRegUse(SUnit *SU) { + for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end(); + I != E; ++I) { + if (I->isCtrl) continue; + SUnit *SuccSU = I->Dep; + if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg) + return true; + } + return false; +} + +/// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's +/// physical register def. +static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU, + const TargetInstrInfo *TII, + const MRegisterInfo *MRI) { + SDNode *N = SuccSU->Node; + unsigned NumDefs = TII->getNumDefs(N->getTargetOpcode()); + const unsigned *ImpDefs = TII->getImplicitDefs(N->getTargetOpcode()); + if (!ImpDefs) + return false; + const unsigned *SUImpDefs = TII->getImplicitDefs(SU->Node->getTargetOpcode()); + if (!SUImpDefs) + return false; + for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) { + MVT::ValueType VT = N->getValueType(i); + if (VT == MVT::Flag || VT == MVT::Other) + continue; + unsigned Reg = ImpDefs[i - NumDefs]; + for (;*SUImpDefs; ++SUImpDefs) { + unsigned SUReg = *SUImpDefs; + if (MRI->regsOverlap(Reg, SUReg)) + return true; + } + } + return false; +} + /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses /// it as a def&use operand. Add a pseudo control edge from it to the other /// node (if it won't create a cycle) so the two-address one will be scheduled -/// first (lower in the schedule). +/// first (lower in the schedule). If both nodes are two-address, favor the +/// one that has a CopyToReg use (more likely to be a loop induction update). +/// If both are two-address, but one is commutable while the other is not +/// commutable, favor the one that's not commutable. template void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() { for (unsigned i = 0, e = SUnits->size(); i != e; ++i) { @@ -1136,7 +1358,7 @@ void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() { continue; SDNode *Node = SU->Node; - if (!Node || !Node->isTargetOpcode()) + if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0) continue; unsigned Opc = Node->getTargetOpcode(); @@ -1145,20 +1367,36 @@ void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() { for (unsigned j = 0; j != NumOps; ++j) { if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) { SDNode *DU = SU->Node->getOperand(j).Val; + if ((*SUnitMap).find(DU) == (*SUnitMap).end()) + continue; SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo]; if (!DUSU) continue; for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end(); I != E; ++I) { if (I->isCtrl) continue; SUnit *SuccSU = I->Dep; - // Don't constraint nodes with implicit defs. It can create cycles - // plus it may increase register pressures. - if (SuccSU == SU || SuccSU->hasImplicitDefs) + if (SuccSU == SU) + continue; + // Be conservative. Ignore if nodes aren't at roughly the same + // depth and height. + if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1) continue; - // Be conservative. Ignore if nodes aren't at the same depth. - if (SuccSU->Depth != SU->Depth) + if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode()) + continue; + // Don't constrain nodes with physical register defs if the + // predecessor can cloober them. + if (SuccSU->hasPhysRegDefs) { + if (canClobberPhysRegDefs(SuccSU, SU, TII, MRI)) + continue; + } + // Don't constraint extract_subreg / insert_subreg these may be + // coalesced away. We don't them close to their uses. + unsigned SuccOpc = SuccSU->Node->getTargetOpcode(); + if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG || + SuccOpc == TargetInstrInfo::INSERT_SUBREG) continue; if ((!canClobber(SuccSU, DUSU) || + (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) || (!SU->isCommutable && SuccSU->isCommutable)) && !isReachable(SuccSU, SU)) { DOUT << "Adding an edge from SU # " << SU->NodeNum @@ -1340,8 +1578,9 @@ llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, SelectionDAG *DAG, MachineBasicBlock *BB) { const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo(); + const MRegisterInfo *MRI = DAG->getTarget().getRegisterInfo(); return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, - new BURegReductionPriorityQueue(TII)); + new BURegReductionPriorityQueue(TII, MRI)); } llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,