1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/ADT/BitVector.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/Support/CommandLine.h"
37 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
38 STATISTIC(NumUnfolds, "Number of nodes unfolded");
39 STATISTIC(NumDups, "Number of duplicated nodes");
40 STATISTIC(NumCCCopies, "Number of cross class copies");
42 static RegisterScheduler
43 burrListDAGScheduler("list-burr",
44 "Bottom-up register reduction list scheduling",
45 createBURRListDAGScheduler);
46 static RegisterScheduler
47 tdrListrDAGScheduler("list-tdrr",
48 "Top-down register reduction list scheduling",
49 createTDRRListDAGScheduler);
52 //===----------------------------------------------------------------------===//
53 /// ScheduleDAGRRList - The actual register reduction list scheduler
54 /// implementation. This supports both top-down and bottom-up scheduling.
56 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
62 /// Fast - True if we are performing fast scheduling.
66 /// AvailableQueue - The priority queue to use for the available SUnits.
67 SchedulingPriorityQueue *AvailableQueue;
69 /// LiveRegDefs - A set of physical registers and their definition
70 /// that are "live". These nodes must be scheduled before any other nodes that
71 /// modifies the registers can be scheduled.
73 std::vector<SUnit*> LiveRegDefs;
74 std::vector<unsigned> LiveRegCycles;
77 ScheduleDAGRRList(SelectionDAG *dag, MachineBasicBlock *bb,
78 const TargetMachine &tm, bool isbottomup, bool f,
79 SchedulingPriorityQueue *availqueue)
80 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
81 AvailableQueue(availqueue) {
84 ~ScheduleDAGRRList() {
85 delete AvailableQueue;
90 /// IsReachable - Checks if SU is reachable from TargetSU.
91 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
93 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
97 /// AddPred - This adds the specified node X as a predecessor of
98 /// the current node Y if not already.
99 /// This returns true if this is a new predecessor.
100 /// Updates the topological ordering if required.
101 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
102 unsigned PhyReg = 0, int Cost = 1);
104 /// RemovePred - This removes the specified node N from the predecessors of
105 /// the current node M. Updates the topological ordering if required.
106 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
109 void ReleasePred(SUnit *SU, SUnit *PredSU, bool isChain);
110 void ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain);
111 void CapturePred(SUnit*, SUnit*, bool);
112 void ScheduleNodeBottomUp(SUnit*, unsigned);
113 void ScheduleNodeTopDown(SUnit*, unsigned);
114 void UnscheduleNodeBottomUp(SUnit*);
115 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
116 SUnit *CopyAndMoveSuccessors(SUnit*);
117 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
118 const TargetRegisterClass*,
119 const TargetRegisterClass*,
120 SmallVector<SUnit*, 2>&);
121 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
122 void ListScheduleTopDown();
123 void ListScheduleBottomUp();
124 void CommuteNodesToReducePressure();
127 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
128 /// Updates the topological ordering if required.
129 SUnit *CreateNewSUnit(SDNode *N) {
130 SUnit *NewNode = NewSUnit(N);
131 // Update the topological ordering.
132 if (NewNode->NodeNum >= Node2Index.size())
133 InitDAGTopologicalSorting();
137 /// CreateClone - Creates a new SUnit from an existing one.
138 /// Updates the topological ordering if required.
139 SUnit *CreateClone(SUnit *N) {
140 SUnit *NewNode = Clone(N);
141 // Update the topological ordering.
142 if (NewNode->NodeNum >= Node2Index.size())
143 InitDAGTopologicalSorting();
147 /// Functions for preserving the topological ordering
148 /// even after dynamic insertions of new edges.
149 /// This allows a very fast implementation of IsReachable.
151 /// InitDAGTopologicalSorting - create the initial topological
152 /// ordering from the DAG to be scheduled.
153 void InitDAGTopologicalSorting();
155 /// DFS - make a DFS traversal and mark all nodes affected by the
156 /// edge insertion. These nodes will later get new topological indexes
157 /// by means of the Shift method.
158 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
160 /// Shift - reassign topological indexes for the nodes in the DAG
161 /// to preserve the topological ordering.
162 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
164 /// Allocate - assign the topological index to the node n.
165 void Allocate(int n, int index);
167 /// Index2Node - Maps topological index to the node number.
168 std::vector<int> Index2Node;
169 /// Node2Index - Maps the node number to its topological index.
170 std::vector<int> Node2Index;
171 /// Visited - a set of nodes visited during a DFS traversal.
174 } // end anonymous namespace
177 /// Schedule - Schedule the DAG using list scheduling.
178 void ScheduleDAGRRList::Schedule() {
179 DOUT << "********** List Scheduling **********\n";
182 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
183 LiveRegCycles.resize(TRI->getNumRegs(), 0);
185 // Build scheduling units.
188 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
189 SUnits[su].dumpAll(this));
194 InitDAGTopologicalSorting();
196 AvailableQueue->initNodes(SUnits);
198 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
200 ListScheduleBottomUp();
202 ListScheduleTopDown();
204 AvailableQueue->releaseState();
207 CommuteNodesToReducePressure();
210 /// CommuteNodesToReducePressure - If a node is two-address and commutable, and
211 /// it is not the last use of its first operand, add it to the CommuteSet if
212 /// possible. It will be commuted when it is translated to a MI.
213 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
214 SmallPtrSet<SUnit*, 4> OperandSeen;
215 for (unsigned i = Sequence.size(); i != 0; ) {
217 SUnit *SU = Sequence[i];
218 if (!SU || !SU->getNode()) continue;
219 if (SU->isCommutable) {
220 unsigned Opc = SU->getNode()->getMachineOpcode();
221 const TargetInstrDesc &TID = TII->get(Opc);
222 unsigned NumRes = TID.getNumDefs();
223 unsigned NumOps = TID.getNumOperands() - NumRes;
224 for (unsigned j = 0; j != NumOps; ++j) {
225 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
228 SDNode *OpN = SU->getNode()->getOperand(j).getNode();
229 SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
230 if (OpSU && OperandSeen.count(OpSU) == 1) {
231 // Ok, so SU is not the last use of OpSU, but SU is two-address so
232 // it will clobber OpSU. Try to commute SU if no other source operands
234 bool DoCommute = true;
235 for (unsigned k = 0; k < NumOps; ++k) {
237 OpN = SU->getNode()->getOperand(k).getNode();
238 OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
239 if (OpSU && OperandSeen.count(OpSU) == 1) {
246 CommuteSet.insert(SU->getNode());
249 // Only look at the first use&def node for now.
254 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
257 OperandSeen.insert(I->Dep->OrigNode);
262 //===----------------------------------------------------------------------===//
263 // Bottom-Up Scheduling
264 //===----------------------------------------------------------------------===//
266 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
267 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
268 void ScheduleDAGRRList::ReleasePred(SUnit *SU, SUnit *PredSU, bool isChain) {
269 --PredSU->NumSuccsLeft;
272 if (PredSU->NumSuccsLeft < 0) {
273 cerr << "*** Scheduling failed! ***\n";
275 cerr << " has been released too many times!\n";
280 // Compute how many cycles it will be before this actually becomes
281 // available. This is the max of the start time of all predecessors plus
283 // If this is a token edge, we don't need to wait for the latency of the
284 // preceeding instruction (e.g. a long-latency load) unless there is also
285 // some other data dependence.
286 unsigned PredDoneCycle = SU->Cycle;
288 PredDoneCycle += PredSU->Latency;
289 else if (SU->Latency)
291 PredSU->CycleBound = std::max(PredSU->CycleBound, PredDoneCycle);
293 if (PredSU->NumSuccsLeft == 0) {
294 PredSU->isAvailable = true;
295 AvailableQueue->push(PredSU);
299 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
300 /// count of its predecessors. If a predecessor pending count is zero, add it to
301 /// the Available queue.
302 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
303 DOUT << "*** Scheduling [" << CurCycle << "]: ";
304 DEBUG(SU->dump(this));
305 SU->Cycle = CurCycle;
307 AvailableQueue->ScheduledNode(SU);
309 // Bottom up: release predecessors
310 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
312 ReleasePred(SU, I->Dep, I->isCtrl);
314 // This is a physical register dependency and it's impossible or
315 // expensive to copy the register. Make sure nothing that can
316 // clobber the register is scheduled between the predecessor and
318 if (!LiveRegDefs[I->Reg]) {
320 LiveRegDefs[I->Reg] = I->Dep;
321 LiveRegCycles[I->Reg] = CurCycle;
326 // Release all the implicit physical register defs that are live.
327 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
330 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
331 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
332 assert(LiveRegDefs[I->Reg] == SU &&
333 "Physical register dependency violated?");
335 LiveRegDefs[I->Reg] = NULL;
336 LiveRegCycles[I->Reg] = 0;
341 SU->isScheduled = true;
344 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
345 /// unscheduled, incrcease the succ left count of its predecessors. Remove
346 /// them from AvailableQueue if necessary.
347 void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
348 unsigned CycleBound = 0;
349 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
353 CycleBound = std::max(CycleBound,
354 I->Dep->Cycle + PredSU->Latency);
357 if (PredSU->isAvailable) {
358 PredSU->isAvailable = false;
359 if (!PredSU->isPending)
360 AvailableQueue->remove(PredSU);
363 PredSU->CycleBound = CycleBound;
364 ++PredSU->NumSuccsLeft;
367 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
368 /// its predecessor states to reflect the change.
369 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
370 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
371 DEBUG(SU->dump(this));
373 AvailableQueue->UnscheduledNode(SU);
375 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
377 CapturePred(I->Dep, SU, I->isCtrl);
378 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
379 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
380 assert(LiveRegDefs[I->Reg] == I->Dep &&
381 "Physical register dependency violated?");
383 LiveRegDefs[I->Reg] = NULL;
384 LiveRegCycles[I->Reg] = 0;
388 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
391 if (!LiveRegDefs[I->Reg]) {
392 LiveRegDefs[I->Reg] = SU;
395 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
396 LiveRegCycles[I->Reg] = I->Dep->Cycle;
401 SU->isScheduled = false;
402 SU->isAvailable = true;
403 AvailableQueue->push(SU);
406 /// IsReachable - Checks if SU is reachable from TargetSU.
407 bool ScheduleDAGRRList::IsReachable(const SUnit *SU, const SUnit *TargetSU) {
408 // If insertion of the edge SU->TargetSU would create a cycle
409 // then there is a path from TargetSU to SU.
410 int UpperBound, LowerBound;
411 LowerBound = Node2Index[TargetSU->NodeNum];
412 UpperBound = Node2Index[SU->NodeNum];
413 bool HasLoop = false;
414 // Is Ord(TargetSU) < Ord(SU) ?
415 if (LowerBound < UpperBound) {
417 // There may be a path from TargetSU to SU. Check for it.
418 DFS(TargetSU, UpperBound, HasLoop);
423 /// Allocate - assign the topological index to the node n.
424 inline void ScheduleDAGRRList::Allocate(int n, int index) {
425 Node2Index[n] = index;
426 Index2Node[index] = n;
429 /// InitDAGTopologicalSorting - create the initial topological
430 /// ordering from the DAG to be scheduled.
432 /// The idea of the algorithm is taken from
433 /// "Online algorithms for managing the topological order of
434 /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
435 /// This is the MNR algorithm, which was first introduced by
436 /// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
437 /// "Maintaining a topological order under edge insertions".
439 /// Short description of the algorithm:
441 /// Topological ordering, ord, of a DAG maps each node to a topological
442 /// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
444 /// This means that if there is a path from the node X to the node Z,
445 /// then ord(X) < ord(Z).
447 /// This property can be used to check for reachability of nodes:
448 /// if Z is reachable from X, then an insertion of the edge Z->X would
451 /// The algorithm first computes a topological ordering for the DAG by
452 /// initializing the Index2Node and Node2Index arrays and then tries to keep
453 /// the ordering up-to-date after edge insertions by reordering the DAG.
455 /// On insertion of the edge X->Y, the algorithm first marks by calling DFS
456 /// the nodes reachable from Y, and then shifts them using Shift to lie
457 /// immediately after X in Index2Node.
458 void ScheduleDAGRRList::InitDAGTopologicalSorting() {
459 unsigned DAGSize = SUnits.size();
460 std::vector<SUnit*> WorkList;
461 WorkList.reserve(DAGSize);
463 Index2Node.resize(DAGSize);
464 Node2Index.resize(DAGSize);
466 // Initialize the data structures.
467 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
468 SUnit *SU = &SUnits[i];
469 int NodeNum = SU->NodeNum;
470 unsigned Degree = SU->Succs.size();
471 // Temporarily use the Node2Index array as scratch space for degree counts.
472 Node2Index[NodeNum] = Degree;
474 // Is it a node without dependencies?
476 assert(SU->Succs.empty() && "SUnit should have no successors");
477 // Collect leaf nodes.
478 WorkList.push_back(SU);
483 while (!WorkList.empty()) {
484 SUnit *SU = WorkList.back();
486 Allocate(SU->NodeNum, --Id);
487 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
490 if (!--Node2Index[SU->NodeNum])
491 // If all dependencies of the node are processed already,
492 // then the node can be computed now.
493 WorkList.push_back(SU);
497 Visited.resize(DAGSize);
500 // Check correctness of the ordering
501 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
502 SUnit *SU = &SUnits[i];
503 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
505 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
506 "Wrong topological sorting");
512 /// AddPred - adds an edge from SUnit X to SUnit Y.
513 /// Updates the topological ordering if required.
514 bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
515 unsigned PhyReg, int Cost) {
516 int UpperBound, LowerBound;
517 LowerBound = Node2Index[Y->NodeNum];
518 UpperBound = Node2Index[X->NodeNum];
519 bool HasLoop = false;
520 // Is Ord(X) < Ord(Y) ?
521 if (LowerBound < UpperBound) {
522 // Update the topological order.
524 DFS(Y, UpperBound, HasLoop);
525 assert(!HasLoop && "Inserted edge creates a loop!");
526 // Recompute topological indexes.
527 Shift(Visited, LowerBound, UpperBound);
529 // Now really insert the edge.
530 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
533 /// RemovePred - This removes the specified node N from the predecessors of
534 /// the current node M. Updates the topological ordering if required.
535 bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
536 bool isCtrl, bool isSpecial) {
537 // InitDAGTopologicalSorting();
538 return M->removePred(N, isCtrl, isSpecial);
541 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
542 /// all nodes affected by the edge insertion. These nodes will later get new
543 /// topological indexes by means of the Shift method.
544 void ScheduleDAGRRList::DFS(const SUnit *SU, int UpperBound, bool& HasLoop) {
545 std::vector<const SUnit*> WorkList;
546 WorkList.reserve(SUnits.size());
548 WorkList.push_back(SU);
549 while (!WorkList.empty()) {
550 SU = WorkList.back();
552 Visited.set(SU->NodeNum);
553 for (int I = SU->Succs.size()-1; I >= 0; --I) {
554 int s = SU->Succs[I].Dep->NodeNum;
555 if (Node2Index[s] == UpperBound) {
559 // Visit successors if not already and in affected region.
560 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
561 WorkList.push_back(SU->Succs[I].Dep);
567 /// Shift - Renumber the nodes so that the topological ordering is
569 void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
575 for (i = LowerBound; i <= UpperBound; ++i) {
576 // w is node at topological index i.
577 int w = Index2Node[i];
578 if (Visited.test(w)) {
584 Allocate(w, i - shift);
588 for (unsigned j = 0; j < L.size(); ++j) {
589 Allocate(L[j], i - shift);
595 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
597 bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
598 if (IsReachable(TargetSU, SU))
600 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
602 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
607 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
608 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
609 /// SUnit. Also returns if a successor is unscheduled in the process.
610 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
611 unsigned &CurCycle) {
613 while (CurCycle > BtCycle) {
614 OldSU = Sequence.back();
616 if (SU->isSucc(OldSU))
617 // Don't try to remove SU from AvailableQueue.
618 SU->isAvailable = false;
619 UnscheduleNodeBottomUp(OldSU);
624 if (SU->isSucc(OldSU)) {
625 assert(false && "Something is wrong!");
632 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
633 /// successors to the newly created node.
634 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
635 if (SU->getNode()->getFlaggedNode())
638 SDNode *N = SU->getNode();
643 bool TryUnfold = false;
644 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
645 MVT VT = N->getValueType(i);
648 else if (VT == MVT::Other)
651 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
652 const SDValue &Op = N->getOperand(i);
653 MVT VT = Op.getNode()->getValueType(Op.getResNo());
659 SmallVector<SDNode*, 2> NewNodes;
660 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
663 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
664 assert(NewNodes.size() == 2 && "Expected a load folding node!");
667 SDNode *LoadNode = NewNodes[0];
668 unsigned NumVals = N->getNumValues();
669 unsigned OldNumVals = SU->getNode()->getNumValues();
670 for (unsigned i = 0; i != NumVals; ++i)
671 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
672 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
673 SDValue(LoadNode, 1));
675 // LoadNode may already exist. This can happen when there is another
676 // load from the same location and producing the same type of value
677 // but it has different alignment or volatileness.
678 bool isNewLoad = true;
680 if (LoadNode->getNodeId() != -1) {
681 LoadSU = &SUnits[LoadNode->getNodeId()];
684 LoadSU = CreateNewSUnit(LoadNode);
685 LoadNode->setNodeId(LoadSU->NodeNum);
687 LoadSU->Depth = SU->Depth;
688 LoadSU->Height = SU->Height;
689 ComputeLatency(LoadSU);
692 SUnit *NewSU = CreateNewSUnit(N);
693 assert(N->getNodeId() == -1 && "Node already inserted!");
694 N->setNodeId(NewSU->NodeNum);
696 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
697 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
698 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
699 NewSU->isTwoAddress = true;
703 if (TID.isCommutable())
704 NewSU->isCommutable = true;
705 // FIXME: Calculate height / depth and propagate the changes?
706 NewSU->Depth = SU->Depth;
707 NewSU->Height = SU->Height;
708 ComputeLatency(NewSU);
710 SUnit *ChainPred = NULL;
711 SmallVector<SDep, 4> ChainSuccs;
712 SmallVector<SDep, 4> LoadPreds;
713 SmallVector<SDep, 4> NodePreds;
714 SmallVector<SDep, 4> NodeSuccs;
715 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
719 else if (I->Dep->getNode() && I->Dep->getNode()->isOperandOf(LoadNode))
720 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
722 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
724 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
727 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
728 I->isCtrl, I->isSpecial));
730 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
731 I->isCtrl, I->isSpecial));
735 RemovePred(SU, ChainPred, true, false);
737 AddPred(LoadSU, ChainPred, true, false);
739 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
740 SDep *Pred = &LoadPreds[i];
741 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
743 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
744 Pred->Reg, Pred->Cost);
747 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
748 SDep *Pred = &NodePreds[i];
749 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
750 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
751 Pred->Reg, Pred->Cost);
753 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
754 SDep *Succ = &NodeSuccs[i];
755 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
756 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
757 Succ->Reg, Succ->Cost);
759 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
760 SDep *Succ = &ChainSuccs[i];
761 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
763 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
764 Succ->Reg, Succ->Cost);
768 AddPred(NewSU, LoadSU, false, false);
772 AvailableQueue->addNode(LoadSU);
773 AvailableQueue->addNode(NewSU);
777 if (NewSU->NumSuccsLeft == 0) {
778 NewSU->isAvailable = true;
784 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
785 NewSU = CreateClone(SU);
787 // New SUnit has the exact same predecessors.
788 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
791 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
792 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
795 // Only copy scheduled successors. Cut them from old node's successor
796 // list and move them over.
797 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
798 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
802 if (I->Dep->isScheduled) {
803 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
804 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
805 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
808 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
809 SUnit *Succ = DelDeps[i].first;
810 bool isCtrl = DelDeps[i].second;
811 RemovePred(Succ, SU, isCtrl, false);
814 AvailableQueue->updateNode(SU);
815 AvailableQueue->addNode(NewSU);
821 /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
822 /// and move all scheduled successors of the given SUnit to the last copy.
823 void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
824 const TargetRegisterClass *DestRC,
825 const TargetRegisterClass *SrcRC,
826 SmallVector<SUnit*, 2> &Copies) {
827 SUnit *CopyFromSU = CreateNewSUnit(NULL);
828 CopyFromSU->CopySrcRC = SrcRC;
829 CopyFromSU->CopyDstRC = DestRC;
830 CopyFromSU->Depth = SU->Depth;
831 CopyFromSU->Height = SU->Height;
833 SUnit *CopyToSU = CreateNewSUnit(NULL);
834 CopyToSU->CopySrcRC = DestRC;
835 CopyToSU->CopyDstRC = SrcRC;
837 // Only copy scheduled successors. Cut them from old node's successor
838 // list and move them over.
839 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
840 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
844 if (I->Dep->isScheduled) {
845 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
846 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
847 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
850 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
851 SUnit *Succ = DelDeps[i].first;
852 bool isCtrl = DelDeps[i].second;
853 RemovePred(Succ, SU, isCtrl, false);
856 AddPred(CopyFromSU, SU, false, false, Reg, -1);
857 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
859 AvailableQueue->updateNode(SU);
860 AvailableQueue->addNode(CopyFromSU);
861 AvailableQueue->addNode(CopyToSU);
862 Copies.push_back(CopyFromSU);
863 Copies.push_back(CopyToSU);
868 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
869 /// definition of the specified node.
870 /// FIXME: Move to SelectionDAG?
871 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
872 const TargetInstrInfo *TII) {
873 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
874 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
875 unsigned NumRes = TID.getNumDefs();
876 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
881 return N->getValueType(NumRes);
884 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
885 /// scheduling of the given node to satisfy live physical register dependencies.
886 /// If the specific node is the last one that's available to schedule, do
887 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
888 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
889 SmallVector<unsigned, 4> &LRegs){
890 if (NumLiveRegs == 0)
893 SmallSet<unsigned, 4> RegAdded;
894 // If this node would clobber any "live" register, then it's not ready.
895 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
898 unsigned Reg = I->Reg;
899 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->Dep) {
900 if (RegAdded.insert(Reg))
901 LRegs.push_back(Reg);
903 for (const unsigned *Alias = TRI->getAliasSet(Reg);
905 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->Dep) {
906 if (RegAdded.insert(*Alias))
907 LRegs.push_back(*Alias);
912 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
913 if (!Node->isMachineOpcode())
915 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
916 if (!TID.ImplicitDefs)
918 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
919 if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) {
920 if (RegAdded.insert(*Reg))
921 LRegs.push_back(*Reg);
923 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
925 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
926 if (RegAdded.insert(*Alias))
927 LRegs.push_back(*Alias);
931 return !LRegs.empty();
935 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
937 void ScheduleDAGRRList::ListScheduleBottomUp() {
938 unsigned CurCycle = 0;
939 // Add root to Available queue.
940 if (!SUnits.empty()) {
941 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
942 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
943 RootSU->isAvailable = true;
944 AvailableQueue->push(RootSU);
947 // While Available queue is not empty, grab the node with the highest
948 // priority. If it is not ready put it back. Schedule the node.
949 SmallVector<SUnit*, 4> NotReady;
950 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
951 Sequence.reserve(SUnits.size());
952 while (!AvailableQueue->empty()) {
953 bool Delayed = false;
955 SUnit *CurSU = AvailableQueue->pop();
957 if (CurSU->CycleBound <= CurCycle) {
958 SmallVector<unsigned, 4> LRegs;
959 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
962 LRegsMap.insert(std::make_pair(CurSU, LRegs));
965 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
966 NotReady.push_back(CurSU);
967 CurSU = AvailableQueue->pop();
970 // All candidates are delayed due to live physical reg dependencies.
971 // Try backtracking, code duplication, or inserting cross class copies
973 if (Delayed && !CurSU) {
974 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
975 SUnit *TrySU = NotReady[i];
976 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
978 // Try unscheduling up to the point where it's safe to schedule
980 unsigned LiveCycle = CurCycle;
981 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
982 unsigned Reg = LRegs[j];
983 unsigned LCycle = LiveRegCycles[Reg];
984 LiveCycle = std::min(LiveCycle, LCycle);
986 SUnit *OldSU = Sequence[LiveCycle];
987 if (!WillCreateCycle(TrySU, OldSU)) {
988 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
989 // Force the current node to be scheduled before the node that
990 // requires the physical reg dep.
991 if (OldSU->isAvailable) {
992 OldSU->isAvailable = false;
993 AvailableQueue->remove(OldSU);
995 AddPred(TrySU, OldSU, true, true);
996 // If one or more successors has been unscheduled, then the current
997 // node is no longer avaialable. Schedule a successor that's now
998 // available instead.
999 if (!TrySU->isAvailable)
1000 CurSU = AvailableQueue->pop();
1003 TrySU->isPending = false;
1004 NotReady.erase(NotReady.begin()+i);
1011 // Can't backtrack. Try duplicating the nodes that produces these
1012 // "expensive to copy" values to break the dependency. In case even
1013 // that doesn't work, insert cross class copies.
1014 SUnit *TrySU = NotReady[0];
1015 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1016 assert(LRegs.size() == 1 && "Can't handle this yet!");
1017 unsigned Reg = LRegs[0];
1018 SUnit *LRDef = LiveRegDefs[Reg];
1019 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1021 // Issue expensive cross register class copies.
1022 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1023 const TargetRegisterClass *RC =
1024 TRI->getPhysicalRegisterRegClass(Reg, VT);
1025 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1027 assert(false && "Don't know how to copy this physical register!");
1030 SmallVector<SUnit*, 2> Copies;
1031 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1032 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1033 << " to SU #" << Copies.front()->NodeNum << "\n";
1034 AddPred(TrySU, Copies.front(), true, true);
1035 NewDef = Copies.back();
1038 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1039 << " to SU #" << TrySU->NodeNum << "\n";
1040 LiveRegDefs[Reg] = NewDef;
1041 AddPred(NewDef, TrySU, true, true);
1042 TrySU->isAvailable = false;
1047 assert(false && "Unable to resolve live physical register dependencies!");
1052 // Add the nodes that aren't ready back onto the available list.
1053 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1054 NotReady[i]->isPending = false;
1055 // May no longer be available due to backtracking.
1056 if (NotReady[i]->isAvailable)
1057 AvailableQueue->push(NotReady[i]);
1062 Sequence.push_back(0);
1064 ScheduleNodeBottomUp(CurSU, CurCycle);
1065 Sequence.push_back(CurSU);
1070 // Reverse the order if it is bottom up.
1071 std::reverse(Sequence.begin(), Sequence.end());
1075 // Verify that all SUnits were scheduled.
1076 bool AnyNotSched = false;
1077 unsigned DeadNodes = 0;
1079 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1080 if (!SUnits[i].isScheduled) {
1081 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1086 cerr << "*** List scheduling failed! ***\n";
1087 SUnits[i].dump(this);
1088 cerr << "has not been scheduled!\n";
1091 if (SUnits[i].NumSuccsLeft != 0) {
1093 cerr << "*** List scheduling failed! ***\n";
1094 SUnits[i].dump(this);
1095 cerr << "has successors left!\n";
1099 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1102 assert(!AnyNotSched);
1103 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1104 "The number of nodes scheduled doesn't match the expected number!");
1108 //===----------------------------------------------------------------------===//
1109 // Top-Down Scheduling
1110 //===----------------------------------------------------------------------===//
1112 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1113 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1114 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, SUnit *SuccSU, bool isChain) {
1115 --SuccSU->NumPredsLeft;
1118 if (SuccSU->NumPredsLeft < 0) {
1119 cerr << "*** Scheduling failed! ***\n";
1121 cerr << " has been released too many times!\n";
1126 // Compute how many cycles it will be before this actually becomes
1127 // available. This is the max of the start time of all predecessors plus
1129 // If this is a token edge, we don't need to wait for the latency of the
1130 // preceeding instruction (e.g. a long-latency load) unless there is also
1131 // some other data dependence.
1132 unsigned PredDoneCycle = SU->Cycle;
1134 PredDoneCycle += SU->Latency;
1135 else if (SU->Latency)
1137 SuccSU->CycleBound = std::max(SuccSU->CycleBound, PredDoneCycle);
1139 if (SuccSU->NumPredsLeft == 0) {
1140 SuccSU->isAvailable = true;
1141 AvailableQueue->push(SuccSU);
1146 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1147 /// count of its successors. If a successor pending count is zero, add it to
1148 /// the Available queue.
1149 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1150 DOUT << "*** Scheduling [" << CurCycle << "]: ";
1151 DEBUG(SU->dump(this));
1153 SU->Cycle = CurCycle;
1154 Sequence.push_back(SU);
1156 // Top down: release successors
1157 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1159 ReleaseSucc(SU, I->Dep, I->isCtrl);
1161 SU->isScheduled = true;
1162 AvailableQueue->ScheduledNode(SU);
1165 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1167 void ScheduleDAGRRList::ListScheduleTopDown() {
1168 unsigned CurCycle = 0;
1170 // All leaves to Available queue.
1171 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1172 // It is available if it has no predecessors.
1173 if (SUnits[i].Preds.empty()) {
1174 AvailableQueue->push(&SUnits[i]);
1175 SUnits[i].isAvailable = true;
1179 // While Available queue is not empty, grab the node with the highest
1180 // priority. If it is not ready put it back. Schedule the node.
1181 std::vector<SUnit*> NotReady;
1182 Sequence.reserve(SUnits.size());
1183 while (!AvailableQueue->empty()) {
1184 SUnit *CurSU = AvailableQueue->pop();
1185 while (CurSU && CurSU->CycleBound > CurCycle) {
1186 NotReady.push_back(CurSU);
1187 CurSU = AvailableQueue->pop();
1190 // Add the nodes that aren't ready back onto the available list.
1191 AvailableQueue->push_all(NotReady);
1195 Sequence.push_back(0);
1197 ScheduleNodeTopDown(CurSU, CurCycle);
1204 // Verify that all SUnits were scheduled.
1205 bool AnyNotSched = false;
1206 unsigned DeadNodes = 0;
1208 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1209 if (!SUnits[i].isScheduled) {
1210 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1215 cerr << "*** List scheduling failed! ***\n";
1216 SUnits[i].dump(this);
1217 cerr << "has not been scheduled!\n";
1220 if (SUnits[i].NumPredsLeft != 0) {
1222 cerr << "*** List scheduling failed! ***\n";
1223 SUnits[i].dump(this);
1224 cerr << "has predecessors left!\n";
1228 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1231 assert(!AnyNotSched);
1232 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1233 "The number of nodes scheduled doesn't match the expected number!");
1239 //===----------------------------------------------------------------------===//
1240 // RegReductionPriorityQueue Implementation
1241 //===----------------------------------------------------------------------===//
1243 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1244 // to reduce register pressure.
1248 class RegReductionPriorityQueue;
1250 /// Sorting functions for the Available queue.
1251 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1252 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1253 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1254 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1256 bool operator()(const SUnit* left, const SUnit* right) const;
1259 struct bu_ls_rr_fast_sort : public std::binary_function<SUnit*, SUnit*, bool>{
1260 RegReductionPriorityQueue<bu_ls_rr_fast_sort> *SPQ;
1261 bu_ls_rr_fast_sort(RegReductionPriorityQueue<bu_ls_rr_fast_sort> *spq)
1263 bu_ls_rr_fast_sort(const bu_ls_rr_fast_sort &RHS) : SPQ(RHS.SPQ) {}
1265 bool operator()(const SUnit* left, const SUnit* right) const;
1268 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1269 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1270 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1271 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1273 bool operator()(const SUnit* left, const SUnit* right) const;
1275 } // end anonymous namespace
1277 static inline bool isCopyFromLiveIn(const SUnit *SU) {
1278 SDNode *N = SU->getNode();
1279 return N && N->getOpcode() == ISD::CopyFromReg &&
1280 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1283 /// CalcNodeBUSethiUllmanNumber - Compute Sethi Ullman number for bottom up
1284 /// scheduling. Smaller number is the higher priority.
1286 CalcNodeBUSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1287 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1288 if (SethiUllmanNumber != 0)
1289 return SethiUllmanNumber;
1292 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1294 if (I->isCtrl) continue; // ignore chain preds
1295 SUnit *PredSU = I->Dep;
1296 unsigned PredSethiUllman = CalcNodeBUSethiUllmanNumber(PredSU, SUNumbers);
1297 if (PredSethiUllman > SethiUllmanNumber) {
1298 SethiUllmanNumber = PredSethiUllman;
1300 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1304 SethiUllmanNumber += Extra;
1306 if (SethiUllmanNumber == 0)
1307 SethiUllmanNumber = 1;
1309 return SethiUllmanNumber;
1312 /// CalcNodeTDSethiUllmanNumber - Compute Sethi Ullman number for top down
1313 /// scheduling. Smaller number is the higher priority.
1315 CalcNodeTDSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1316 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1317 if (SethiUllmanNumber != 0)
1318 return SethiUllmanNumber;
1320 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1321 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1322 SethiUllmanNumber = 0xffff;
1323 else if (SU->NumSuccsLeft == 0)
1324 // If SU does not have a use, i.e. it doesn't produce a value that would
1325 // be consumed (e.g. store), then it terminates a chain of computation.
1326 // Give it a small SethiUllman number so it will be scheduled right before
1327 // its predecessors that it doesn't lengthen their live ranges.
1328 SethiUllmanNumber = 0;
1329 else if (SU->NumPredsLeft == 0 &&
1330 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1331 SethiUllmanNumber = 0xffff;
1334 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1336 if (I->isCtrl) continue; // ignore chain preds
1337 SUnit *PredSU = I->Dep;
1338 unsigned PredSethiUllman = CalcNodeTDSethiUllmanNumber(PredSU, SUNumbers);
1339 if (PredSethiUllman > SethiUllmanNumber) {
1340 SethiUllmanNumber = PredSethiUllman;
1342 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1346 SethiUllmanNumber += Extra;
1349 return SethiUllmanNumber;
1355 class VISIBILITY_HIDDEN RegReductionPriorityQueue
1356 : public SchedulingPriorityQueue {
1357 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
1358 unsigned currentQueueId;
1361 RegReductionPriorityQueue() :
1362 Queue(SF(this)), currentQueueId(0) {}
1364 virtual void initNodes(std::vector<SUnit> &sunits) = 0;
1366 virtual void addNode(const SUnit *SU) = 0;
1368 virtual void updateNode(const SUnit *SU) = 0;
1370 virtual void releaseState() = 0;
1372 virtual unsigned getNodePriority(const SUnit *SU) const = 0;
1374 unsigned size() const { return Queue.size(); }
1376 bool empty() const { return Queue.empty(); }
1378 void push(SUnit *U) {
1379 assert(!U->NodeQueueId && "Node in the queue already");
1380 U->NodeQueueId = ++currentQueueId;
1384 void push_all(const std::vector<SUnit *> &Nodes) {
1385 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1390 if (empty()) return NULL;
1391 SUnit *V = Queue.top();
1397 void remove(SUnit *SU) {
1398 assert(!Queue.empty() && "Queue is empty!");
1399 assert(SU->NodeQueueId != 0 && "Not in queue!");
1400 Queue.erase_one(SU);
1401 SU->NodeQueueId = 0;
1405 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
1406 : public RegReductionPriorityQueue<bu_ls_rr_sort> {
1407 // SUnits - The SUnits for the current graph.
1408 std::vector<SUnit> *SUnits;
1410 // SethiUllmanNumbers - The SethiUllman number for each node.
1411 std::vector<unsigned> SethiUllmanNumbers;
1413 const TargetInstrInfo *TII;
1414 const TargetRegisterInfo *TRI;
1415 ScheduleDAGRRList *scheduleDAG;
1418 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
1419 const TargetRegisterInfo *tri)
1420 : TII(tii), TRI(tri), scheduleDAG(NULL) {}
1422 void initNodes(std::vector<SUnit> &sunits) {
1424 // Add pseudo dependency edges for two-address nodes.
1425 AddPseudoTwoAddrDeps();
1426 // Calculate node priorities.
1427 CalculateSethiUllmanNumbers();
1430 void addNode(const SUnit *SU) {
1431 unsigned SUSize = SethiUllmanNumbers.size();
1432 if (SUnits->size() > SUSize)
1433 SethiUllmanNumbers.resize(SUSize*2, 0);
1434 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1437 void updateNode(const SUnit *SU) {
1438 SethiUllmanNumbers[SU->NodeNum] = 0;
1439 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1442 void releaseState() {
1444 SethiUllmanNumbers.clear();
1447 unsigned getNodePriority(const SUnit *SU) const {
1448 assert(SU->NodeNum < SethiUllmanNumbers.size());
1449 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1450 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1451 // CopyFromReg should be close to its def because it restricts
1452 // allocation choices. But if it is a livein then perhaps we want it
1453 // closer to its uses so it can be coalesced.
1455 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1456 // CopyToReg should be close to its uses to facilitate coalescing and
1459 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1460 Opc == TargetInstrInfo::INSERT_SUBREG)
1461 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1462 // facilitate coalescing.
1464 else if (SU->NumSuccs == 0)
1465 // If SU does not have a use, i.e. it doesn't produce a value that would
1466 // be consumed (e.g. store), then it terminates a chain of computation.
1467 // Give it a large SethiUllman number so it will be scheduled right
1468 // before its predecessors that it doesn't lengthen their live ranges.
1470 else if (SU->NumPreds == 0)
1471 // If SU does not have a def, schedule it close to its uses because it
1472 // does not lengthen any live ranges.
1475 return SethiUllmanNumbers[SU->NodeNum];
1478 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1479 scheduleDAG = scheduleDag;
1483 bool canClobber(const SUnit *SU, const SUnit *Op);
1484 void AddPseudoTwoAddrDeps();
1485 void CalculateSethiUllmanNumbers();
1489 class VISIBILITY_HIDDEN BURegReductionFastPriorityQueue
1490 : public RegReductionPriorityQueue<bu_ls_rr_fast_sort> {
1491 // SUnits - The SUnits for the current graph.
1492 const std::vector<SUnit> *SUnits;
1494 // SethiUllmanNumbers - The SethiUllman number for each node.
1495 std::vector<unsigned> SethiUllmanNumbers;
1497 explicit BURegReductionFastPriorityQueue() {}
1499 void initNodes(std::vector<SUnit> &sunits) {
1501 // Calculate node priorities.
1502 CalculateSethiUllmanNumbers();
1505 void addNode(const SUnit *SU) {
1506 unsigned SUSize = SethiUllmanNumbers.size();
1507 if (SUnits->size() > SUSize)
1508 SethiUllmanNumbers.resize(SUSize*2, 0);
1509 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1512 void updateNode(const SUnit *SU) {
1513 SethiUllmanNumbers[SU->NodeNum] = 0;
1514 CalcNodeBUSethiUllmanNumber(SU, SethiUllmanNumbers);
1517 void releaseState() {
1519 SethiUllmanNumbers.clear();
1522 unsigned getNodePriority(const SUnit *SU) const {
1523 return SethiUllmanNumbers[SU->NodeNum];
1527 void CalculateSethiUllmanNumbers();
1531 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1532 : public RegReductionPriorityQueue<td_ls_rr_sort> {
1533 // SUnits - The SUnits for the current graph.
1534 const std::vector<SUnit> *SUnits;
1536 // SethiUllmanNumbers - The SethiUllman number for each node.
1537 std::vector<unsigned> SethiUllmanNumbers;
1540 TDRegReductionPriorityQueue() {}
1542 void initNodes(std::vector<SUnit> &sunits) {
1544 // Calculate node priorities.
1545 CalculateSethiUllmanNumbers();
1548 void addNode(const SUnit *SU) {
1549 unsigned SUSize = SethiUllmanNumbers.size();
1550 if (SUnits->size() > SUSize)
1551 SethiUllmanNumbers.resize(SUSize*2, 0);
1552 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1555 void updateNode(const SUnit *SU) {
1556 SethiUllmanNumbers[SU->NodeNum] = 0;
1557 CalcNodeTDSethiUllmanNumber(SU, SethiUllmanNumbers);
1560 void releaseState() {
1562 SethiUllmanNumbers.clear();
1565 unsigned getNodePriority(const SUnit *SU) const {
1566 assert(SU->NodeNum < SethiUllmanNumbers.size());
1567 return SethiUllmanNumbers[SU->NodeNum];
1571 void CalculateSethiUllmanNumbers();
1575 /// closestSucc - Returns the scheduled cycle of the successor which is
1576 /// closet to the current cycle.
1577 static unsigned closestSucc(const SUnit *SU) {
1578 unsigned MaxCycle = 0;
1579 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1581 unsigned Cycle = I->Dep->Cycle;
1582 // If there are bunch of CopyToRegs stacked up, they should be considered
1583 // to be at the same position.
1584 if (I->Dep->getNode() && I->Dep->getNode()->getOpcode() == ISD::CopyToReg)
1585 Cycle = closestSucc(I->Dep)+1;
1586 if (Cycle > MaxCycle)
1592 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1593 /// for scratch registers. Live-in operands and live-out results don't count
1594 /// since they are "fixed".
1595 static unsigned calcMaxScratches(const SUnit *SU) {
1596 unsigned Scratches = 0;
1597 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1599 if (I->isCtrl) continue; // ignore chain preds
1600 if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyFromReg)
1603 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1605 if (I->isCtrl) continue; // ignore chain succs
1606 if (!I->Dep->getNode() || I->Dep->getNode()->getOpcode() != ISD::CopyToReg)
1613 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1614 unsigned LPriority = SPQ->getNodePriority(left);
1615 unsigned RPriority = SPQ->getNodePriority(right);
1616 if (LPriority != RPriority)
1617 return LPriority > RPriority;
1619 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1624 // and the following instructions are both ready.
1628 // Then schedule t2 = op first.
1635 // This creates more short live intervals.
1636 unsigned LDist = closestSucc(left);
1637 unsigned RDist = closestSucc(right);
1639 return LDist < RDist;
1641 // Intuitively, it's good to push down instructions whose results are
1642 // liveout so their long live ranges won't conflict with other values
1643 // which are needed inside the BB. Further prioritize liveout instructions
1644 // by the number of operands which are calculated within the BB.
1645 unsigned LScratch = calcMaxScratches(left);
1646 unsigned RScratch = calcMaxScratches(right);
1647 if (LScratch != RScratch)
1648 return LScratch > RScratch;
1650 if (left->Height != right->Height)
1651 return left->Height > right->Height;
1653 if (left->Depth != right->Depth)
1654 return left->Depth < right->Depth;
1656 if (left->CycleBound != right->CycleBound)
1657 return left->CycleBound > right->CycleBound;
1659 assert(left->NodeQueueId && right->NodeQueueId &&
1660 "NodeQueueId cannot be zero");
1661 return (left->NodeQueueId > right->NodeQueueId);
1665 bu_ls_rr_fast_sort::operator()(const SUnit *left, const SUnit *right) const {
1666 unsigned LPriority = SPQ->getNodePriority(left);
1667 unsigned RPriority = SPQ->getNodePriority(right);
1668 if (LPriority != RPriority)
1669 return LPriority > RPriority;
1670 assert(left->NodeQueueId && right->NodeQueueId &&
1671 "NodeQueueId cannot be zero");
1672 return (left->NodeQueueId > right->NodeQueueId);
1676 BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
1677 if (SU->isTwoAddress) {
1678 unsigned Opc = SU->getNode()->getMachineOpcode();
1679 const TargetInstrDesc &TID = TII->get(Opc);
1680 unsigned NumRes = TID.getNumDefs();
1681 unsigned NumOps = TID.getNumOperands() - NumRes;
1682 for (unsigned i = 0; i != NumOps; ++i) {
1683 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1684 SDNode *DU = SU->getNode()->getOperand(i).getNode();
1685 if (DU->getNodeId() != -1 &&
1686 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1695 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1697 static bool hasCopyToRegUse(const SUnit *SU) {
1698 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1700 if (I->isCtrl) continue;
1701 const SUnit *SuccSU = I->Dep;
1702 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
1708 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1709 /// physical register defs.
1710 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1711 const TargetInstrInfo *TII,
1712 const TargetRegisterInfo *TRI) {
1713 SDNode *N = SuccSU->getNode();
1714 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1715 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1716 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1717 const unsigned *SUImpDefs =
1718 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
1721 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1722 MVT VT = N->getValueType(i);
1723 if (VT == MVT::Flag || VT == MVT::Other)
1725 if (!N->hasAnyUseOfValue(i))
1727 unsigned Reg = ImpDefs[i - NumDefs];
1728 for (;*SUImpDefs; ++SUImpDefs) {
1729 unsigned SUReg = *SUImpDefs;
1730 if (TRI->regsOverlap(Reg, SUReg))
1737 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1738 /// it as a def&use operand. Add a pseudo control edge from it to the other
1739 /// node (if it won't create a cycle) so the two-address one will be scheduled
1740 /// first (lower in the schedule). If both nodes are two-address, favor the
1741 /// one that has a CopyToReg use (more likely to be a loop induction update).
1742 /// If both are two-address, but one is commutable while the other is not
1743 /// commutable, favor the one that's not commutable.
1744 void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
1745 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1746 SUnit *SU = &(*SUnits)[i];
1747 if (!SU->isTwoAddress)
1750 SDNode *Node = SU->getNode();
1751 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
1754 unsigned Opc = Node->getMachineOpcode();
1755 const TargetInstrDesc &TID = TII->get(Opc);
1756 unsigned NumRes = TID.getNumDefs();
1757 unsigned NumOps = TID.getNumOperands() - NumRes;
1758 for (unsigned j = 0; j != NumOps; ++j) {
1759 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
1760 SDNode *DU = SU->getNode()->getOperand(j).getNode();
1761 if (DU->getNodeId() == -1)
1763 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1764 if (!DUSU) continue;
1765 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1766 E = DUSU->Succs.end(); I != E; ++I) {
1767 if (I->isCtrl) continue;
1768 SUnit *SuccSU = I->Dep;
1771 // Be conservative. Ignore if nodes aren't at roughly the same
1772 // depth and height.
1773 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1775 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
1777 // Don't constrain nodes with physical register defs if the
1778 // predecessor can clobber them.
1779 if (SuccSU->hasPhysRegDefs) {
1780 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1783 // Don't constraint extract_subreg / insert_subreg these may be
1784 // coalesced away. We don't them close to their uses.
1785 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
1786 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1787 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1789 if ((!canClobber(SuccSU, DUSU) ||
1790 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1791 (!SU->isCommutable && SuccSU->isCommutable)) &&
1792 !scheduleDAG->IsReachable(SuccSU, SU)) {
1793 DOUT << "Adding an edge from SU # " << SU->NodeNum
1794 << " to SU #" << SuccSU->NodeNum << "\n";
1795 scheduleDAG->AddPred(SU, SuccSU, true, true);
1803 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1804 /// scheduling units.
1805 void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1806 SethiUllmanNumbers.assign(SUnits->size(), 0);
1808 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1809 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1811 void BURegReductionFastPriorityQueue::CalculateSethiUllmanNumbers() {
1812 SethiUllmanNumbers.assign(SUnits->size(), 0);
1814 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1815 CalcNodeBUSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1818 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1819 /// predecessors of the successors of the SUnit SU. Stop when the provided
1820 /// limit is exceeded.
1821 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1824 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1826 const SUnit *SuccSU = I->Dep;
1827 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1828 EE = SuccSU->Preds.end(); II != EE; ++II) {
1829 SUnit *PredSU = II->Dep;
1830 if (!PredSU->isScheduled)
1840 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1841 unsigned LPriority = SPQ->getNodePriority(left);
1842 unsigned RPriority = SPQ->getNodePriority(right);
1843 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
1844 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
1845 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1846 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1847 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1848 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1850 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1852 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1859 if (left->NumSuccs == 1)
1861 if (right->NumSuccs == 1)
1864 if (LPriority+LBonus != RPriority+RBonus)
1865 return LPriority+LBonus < RPriority+RBonus;
1867 if (left->Depth != right->Depth)
1868 return left->Depth < right->Depth;
1870 if (left->NumSuccsLeft != right->NumSuccsLeft)
1871 return left->NumSuccsLeft > right->NumSuccsLeft;
1873 if (left->CycleBound != right->CycleBound)
1874 return left->CycleBound > right->CycleBound;
1876 assert(left->NodeQueueId && right->NodeQueueId &&
1877 "NodeQueueId cannot be zero");
1878 return (left->NodeQueueId > right->NodeQueueId);
1881 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1882 /// scheduling units.
1883 void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1884 SethiUllmanNumbers.assign(SUnits->size(), 0);
1886 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1887 CalcNodeTDSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1890 //===----------------------------------------------------------------------===//
1891 // Public Constructor Functions
1892 //===----------------------------------------------------------------------===//
1894 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1896 const TargetMachine *TM,
1897 MachineBasicBlock *BB,
1900 return new ScheduleDAGRRList(DAG, BB, *TM, true, true,
1901 new BURegReductionFastPriorityQueue());
1903 const TargetInstrInfo *TII = TM->getInstrInfo();
1904 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
1906 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1908 ScheduleDAGRRList *SD =
1909 new ScheduleDAGRRList(DAG, BB, *TM, true, false, PQ);
1910 PQ->setScheduleDAG(SD);
1914 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1916 const TargetMachine *TM,
1917 MachineBasicBlock *BB,
1919 return new ScheduleDAGRRList(DAG, BB, *TM, false, Fast,
1920 new TDRegReductionPriorityQueue());