+/// CapturePred - This does the opposite of ReleasePred. Since SU is being
+/// unscheduled, incrcease the succ left count of its predecessors. Remove
+/// them from AvailableQueue if necessary.
+void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
+ unsigned CycleBound = 0;
+ for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
+ I != E; ++I) {
+ if (I->Dep == SU)
+ continue;
+ CycleBound = std::max(CycleBound,
+ I->Dep->Cycle + PredSU->Latency);
+ }
+
+ if (PredSU->isAvailable) {
+ PredSU->isAvailable = false;
+ if (!PredSU->isPending)
+ AvailableQueue->remove(PredSU);
+ }
+
+ PredSU->CycleBound = CycleBound;
+ ++PredSU->NumSuccsLeft;
+}
+
+/// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
+/// its predecessor states to reflect the change.
+void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
+ DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
+ DEBUG(SU->dump(&DAG));
+
+ AvailableQueue->UnscheduledNode(SU);
+
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ CapturePred(I->Dep, SU, I->isCtrl);
+ if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
+ LiveRegs.erase(I->Reg);
+ assert(LiveRegDefs[I->Reg] == I->Dep &&
+ "Physical register dependency violated?");
+ LiveRegDefs[I->Reg] = NULL;
+ LiveRegCycles[I->Reg] = 0;
+ }
+ }
+
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->Cost < 0) {
+ if (LiveRegs.insert(I->Reg)) {
+ assert(!LiveRegDefs[I->Reg] &&
+ "Physical register dependency violated?");
+ LiveRegDefs[I->Reg] = SU;
+ }
+ if (I->Dep->Cycle < LiveRegCycles[I->Reg])
+ LiveRegCycles[I->Reg] = I->Dep->Cycle;
+ }
+ }
+
+ SU->Cycle = 0;
+ SU->isScheduled = false;
+ SU->isAvailable = true;
+ AvailableQueue->push(SU);
+}
+
+/// IsReachable - Checks if SU is reachable from TargetSU.
+bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
+ // If insertion of the edge SU->TargetSU would create a cycle
+ // then there is a path from TargetSU to SU.
+ int UpperBound, LowerBound;
+ LowerBound = Node2Index[TargetSU->NodeNum];
+ UpperBound = Node2Index[SU->NodeNum];
+ bool HasLoop = false;
+ // Is Ord(TargetSU) < Ord(SU) ?
+ if (LowerBound < UpperBound) {
+ Visited.reset();
+ // There may be a path from TargetSU to SU. Check for it.
+ DFS(TargetSU, UpperBound, HasLoop);
+ }
+ return HasLoop;
+}
+
+/// Allocate - assign the topological index to the node n.
+inline void ScheduleDAGRRList::Allocate(int n, int index) {
+ Node2Index[n] = index;
+ Index2Node[index] = n;
+}
+
+/// InitDAGTopologicalSorting - create the initial topological
+/// ordering from the DAG to be scheduled.
+void ScheduleDAGRRList::InitDAGTopologicalSorting() {
+ unsigned DAGSize = SUnits.size();
+ std::vector<unsigned> InDegree(DAGSize);
+ std::vector<SUnit*> WorkList;
+ WorkList.reserve(DAGSize);
+ std::vector<SUnit*> TopOrder;
+ TopOrder.reserve(DAGSize);
+
+ // Initialize the data structures.
+ for (unsigned i = 0, e = DAGSize; i != e; ++i) {
+ SUnit *SU = &SUnits[i];
+ int NodeNum = SU->NodeNum;
+ unsigned Degree = SU->Succs.size();
+ InDegree[NodeNum] = Degree;
+
+ // Is it a node without dependencies?
+ if (Degree == 0) {
+ assert(SU->Succs.empty() && "SUnit should have no successors");
+ // Collect leaf nodes.
+ WorkList.push_back(SU);
+ }
+ }
+
+ while (!WorkList.empty()) {
+ SUnit *SU = WorkList.back();
+ WorkList.pop_back();
+ TopOrder.push_back(SU);
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ SUnit *SU = I->Dep;
+ if (!--InDegree[SU->NodeNum])
+ // If all dependencies of the node are processed already,
+ // then the node can be computed now.
+ WorkList.push_back(SU);
+ }
+ }
+
+ // Second pass, assign the actual topological order as node ids.
+ int Id = 0;
+
+ Index2Node.clear();
+ Node2Index.clear();
+ Index2Node.resize(DAGSize);
+ Node2Index.resize(DAGSize);
+ Visited.resize(DAGSize);
+
+ for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(),
+ TE = TopOrder.rend();TI != TE; ++TI) {
+ Allocate((*TI)->NodeNum, Id);
+ Id++;
+ }
+
+#ifndef NDEBUG
+ // Check correctness of the ordering
+ for (unsigned i = 0, e = DAGSize; i != e; ++i) {
+ SUnit *SU = &SUnits[i];
+ for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
+ "Wrong topological sorting");
+ }
+ }
+#endif
+}
+
+/// AddPred - adds an edge from SUnit X to SUnit Y.
+/// Updates the topological ordering if required.
+bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
+ unsigned PhyReg, int Cost) {
+ int UpperBound, LowerBound;
+ LowerBound = Node2Index[Y->NodeNum];
+ UpperBound = Node2Index[X->NodeNum];
+ bool HasLoop = false;
+ // Is Ord(X) < Ord(Y) ?
+ if (LowerBound < UpperBound) {
+ // Update the topological order.
+ Visited.reset();
+ DFS(Y, UpperBound, HasLoop);
+ assert(!HasLoop && "Inserted edge creates a loop!");
+ // Recompute topological indexes.
+ Shift(Visited, LowerBound, UpperBound);
+ }
+ // Now really insert the edge.
+ return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
+}
+
+/// RemovePred - This removes the specified node N from the predecessors of
+/// the current node M. Updates the topological ordering if required.
+bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
+ bool isCtrl, bool isSpecial) {
+ // InitDAGTopologicalSorting();
+ return M->removePred(N, isCtrl, isSpecial);
+}
+
+/// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
+/// all nodes affected by the edge insertion. These nodes will later get new
+/// topological indexes by means of the Shift method.
+void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
+ std::vector<SUnit*> WorkList;
+ WorkList.reserve(SUnits.size());
+
+ WorkList.push_back(SU);
+ while (!WorkList.empty()) {
+ SU = WorkList.back();
+ WorkList.pop_back();
+ Visited.set(SU->NodeNum);
+ for (int I = SU->Succs.size()-1; I >= 0; --I) {
+ int s = SU->Succs[I].Dep->NodeNum;
+ if (Node2Index[s] == UpperBound) {
+ HasLoop = true;
+ return;
+ }
+ // Visit successors if not already and in affected region.
+ if (!Visited.test(s) && Node2Index[s] < UpperBound) {
+ WorkList.push_back(SU->Succs[I].Dep);
+ }
+ }
+ }
+}
+
+/// Shift - Renumber the nodes so that the topological ordering is
+/// preserved.
+void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
+ int UpperBound) {
+ std::vector<int> L;
+ int shift = 0;
+ int i;
+
+ for (i = LowerBound; i <= UpperBound; ++i) {
+ // w is node at topological index i.
+ int w = Index2Node[i];
+ if (Visited.test(w)) {
+ // Unmark.
+ Visited.reset(w);
+ L.push_back(w);
+ shift = shift + 1;
+ } else {
+ Allocate(w, i - shift);
+ }
+ }
+
+ for (unsigned j = 0; j < L.size(); ++j) {
+ Allocate(L[j], i - shift);
+ i = i + 1;
+ }
+}
+
+
+/// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
+/// create a cycle.
+bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
+ if (IsReachable(TargetSU, SU))
+ return true;
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I)
+ if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
+ return true;
+ return false;
+}
+
+/// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
+/// BTCycle in order to schedule a specific node. Returns the last unscheduled
+/// SUnit. Also returns if a successor is unscheduled in the process.
+void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
+ unsigned &CurCycle) {
+ SUnit *OldSU = NULL;
+ while (CurCycle > BtCycle) {
+ OldSU = Sequence.back();
+ Sequence.pop_back();
+ if (SU->isSucc(OldSU))
+ // Don't try to remove SU from AvailableQueue.
+ SU->isAvailable = false;
+ UnscheduleNodeBottomUp(OldSU);
+ --CurCycle;
+ }
+
+
+ if (SU->isSucc(OldSU)) {
+ assert(false && "Something is wrong!");
+ abort();
+ }
+
+ ++NumBacktracks;
+}
+
+/// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
+/// successors to the newly created node.
+SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
+ if (SU->FlaggedNodes.size())
+ return NULL;
+
+ SDNode *N = SU->Node;
+ if (!N)
+ return NULL;
+
+ SUnit *NewSU;
+ bool TryUnfold = false;
+ for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
+ MVT VT = N->getValueType(i);
+ if (VT == MVT::Flag)
+ return NULL;
+ else if (VT == MVT::Other)
+ TryUnfold = true;
+ }
+ for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
+ const SDOperand &Op = N->getOperand(i);
+ MVT VT = Op.Val->getValueType(Op.ResNo);
+ if (VT == MVT::Flag)
+ return NULL;
+ }
+
+ if (TryUnfold) {
+ SmallVector<SDNode*, 2> NewNodes;
+ if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
+ return NULL;
+
+ DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
+ assert(NewNodes.size() == 2 && "Expected a load folding node!");
+
+ N = NewNodes[1];
+ SDNode *LoadNode = NewNodes[0];
+ unsigned NumVals = N->getNumValues();
+ unsigned OldNumVals = SU->Node->getNumValues();
+ for (unsigned i = 0; i != NumVals; ++i)
+ DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i));
+ DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1),
+ SDOperand(LoadNode, 1));
+
+ SUnit *NewSU = CreateNewSUnit(N);
+ assert(N->getNodeId() == -1 && "Node already inserted!");
+ N->setNodeId(NewSU->NodeNum);
+
+ const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
+ for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
+ if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
+ NewSU->isTwoAddress = true;
+ break;
+ }
+ }
+ if (TID.isCommutable())
+ NewSU->isCommutable = true;
+ // FIXME: Calculate height / depth and propagate the changes?
+ NewSU->Depth = SU->Depth;
+ NewSU->Height = SU->Height;
+ ComputeLatency(NewSU);
+
+ // LoadNode may already exist. This can happen when there is another
+ // load from the same location and producing the same type of value
+ // but it has different alignment or volatileness.
+ bool isNewLoad = true;
+ SUnit *LoadSU;
+ if (LoadNode->getNodeId() != -1) {
+ LoadSU = &SUnits[LoadNode->getNodeId()];
+ isNewLoad = false;
+ } else {
+ LoadSU = CreateNewSUnit(LoadNode);
+ LoadNode->setNodeId(LoadSU->NodeNum);
+
+ LoadSU->Depth = SU->Depth;
+ LoadSU->Height = SU->Height;
+ ComputeLatency(LoadSU);
+ }
+
+ SUnit *ChainPred = NULL;
+ SmallVector<SDep, 4> ChainSuccs;
+ SmallVector<SDep, 4> LoadPreds;
+ SmallVector<SDep, 4> NodePreds;
+ SmallVector<SDep, 4> NodeSuccs;
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->isCtrl)
+ ChainPred = I->Dep;
+ else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
+ LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
+ else
+ NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
+ }
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isCtrl)
+ ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
+ I->isCtrl, I->isSpecial));
+ else
+ NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
+ I->isCtrl, I->isSpecial));
+ }
+
+ if (ChainPred) {
+ RemovePred(SU, ChainPred, true, false);
+ if (isNewLoad)
+ AddPred(LoadSU, ChainPred, true, false);
+ }
+ for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
+ SDep *Pred = &LoadPreds[i];
+ RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
+ if (isNewLoad) {
+ AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
+ Pred->Reg, Pred->Cost);
+ }
+ }
+ for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
+ SDep *Pred = &NodePreds[i];
+ RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
+ AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
+ Pred->Reg, Pred->Cost);
+ }
+ for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
+ SDep *Succ = &NodeSuccs[i];
+ RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
+ AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
+ Succ->Reg, Succ->Cost);
+ }
+ for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
+ SDep *Succ = &ChainSuccs[i];
+ RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
+ if (isNewLoad) {
+ AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
+ Succ->Reg, Succ->Cost);
+ }
+ }
+ if (isNewLoad) {
+ AddPred(NewSU, LoadSU, false, false);
+ }
+
+ if (isNewLoad)
+ AvailableQueue->addNode(LoadSU);
+ AvailableQueue->addNode(NewSU);
+
+ ++NumUnfolds;
+
+ if (NewSU->NumSuccsLeft == 0) {
+ NewSU->isAvailable = true;
+ return NewSU;
+ }
+ SU = NewSU;
+ }
+
+ DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
+ NewSU = CreateClone(SU);
+
+ // New SUnit has the exact same predecessors.
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I)
+ if (!I->isSpecial) {
+ AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
+ NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
+ }
+
+ // Only copy scheduled successors. Cut them from old node's successor
+ // list and move them over.
+ SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isSpecial)
+ continue;
+ if (I->Dep->isScheduled) {
+ NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
+ AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
+ DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
+ }
+ }
+ for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
+ SUnit *Succ = DelDeps[i].first;
+ bool isCtrl = DelDeps[i].second;
+ RemovePred(Succ, SU, isCtrl, false);
+ }
+
+ AvailableQueue->updateNode(SU);
+ AvailableQueue->addNode(NewSU);
+
+ ++NumDups;
+ return NewSU;
+}
+
+/// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
+/// and move all scheduled successors of the given SUnit to the last copy.
+void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
+ const TargetRegisterClass *DestRC,
+ const TargetRegisterClass *SrcRC,
+ SmallVector<SUnit*, 2> &Copies) {
+ SUnit *CopyFromSU = CreateNewSUnit(NULL);
+ CopyFromSU->CopySrcRC = SrcRC;
+ CopyFromSU->CopyDstRC = DestRC;
+ CopyFromSU->Depth = SU->Depth;
+ CopyFromSU->Height = SU->Height;
+
+ SUnit *CopyToSU = CreateNewSUnit(NULL);
+ CopyToSU->CopySrcRC = DestRC;
+ CopyToSU->CopyDstRC = SrcRC;
+
+ // Only copy scheduled successors. Cut them from old node's successor
+ // list and move them over.
+ SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
+ for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
+ I != E; ++I) {
+ if (I->isSpecial)
+ continue;
+ if (I->Dep->isScheduled) {
+ CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
+ AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
+ DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
+ }
+ }
+ for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
+ SUnit *Succ = DelDeps[i].first;
+ bool isCtrl = DelDeps[i].second;
+ RemovePred(Succ, SU, isCtrl, false);
+ }
+
+ AddPred(CopyFromSU, SU, false, false, Reg, -1);
+ AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
+
+ AvailableQueue->updateNode(SU);
+ AvailableQueue->addNode(CopyFromSU);
+ AvailableQueue->addNode(CopyToSU);
+ Copies.push_back(CopyFromSU);
+ Copies.push_back(CopyToSU);
+
+ ++NumCCCopies;
+}
+
+/// getPhysicalRegisterVT - Returns the ValueType of the physical register
+/// definition of the specified node.
+/// FIXME: Move to SelectionDAG?
+static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
+ const TargetInstrInfo *TII) {
+ const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
+ assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
+ unsigned NumRes = TID.getNumDefs();
+ for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
+ if (Reg == *ImpDef)
+ break;
+ ++NumRes;
+ }
+ return N->getValueType(NumRes);
+}
+
+/// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
+/// scheduling of the given node to satisfy live physical register dependencies.
+/// If the specific node is the last one that's available to schedule, do
+/// whatever is necessary (i.e. backtracking or cloning) to make it possible.
+bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
+ SmallVector<unsigned, 4> &LRegs){
+ if (LiveRegs.empty())
+ return false;
+
+ SmallSet<unsigned, 4> RegAdded;
+ // If this node would clobber any "live" register, then it's not ready.
+ for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
+ I != E; ++I) {
+ if (I->Cost < 0) {
+ unsigned Reg = I->Reg;
+ if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
+ if (RegAdded.insert(Reg))
+ LRegs.push_back(Reg);
+ }
+ for (const unsigned *Alias = TRI->getAliasSet(Reg);
+ *Alias; ++Alias)
+ if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
+ if (RegAdded.insert(*Alias))
+ LRegs.push_back(*Alias);
+ }
+ }
+ }
+
+ for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
+ SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
+ if (!Node || !Node->isTargetOpcode())
+ continue;
+ const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode());
+ if (!TID.ImplicitDefs)
+ continue;
+ for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
+ if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
+ if (RegAdded.insert(*Reg))
+ LRegs.push_back(*Reg);
+ }
+ for (const unsigned *Alias = TRI->getAliasSet(*Reg);
+ *Alias; ++Alias)
+ if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
+ if (RegAdded.insert(*Alias))
+ LRegs.push_back(*Alias);
+ }
+ }
+ }
+ return !LRegs.empty();