1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/ADT/BitVector.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SmallPtrSet.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
34 #include "llvm/Support/CommandLine.h"
37 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
38 STATISTIC(NumUnfolds, "Number of nodes unfolded");
39 STATISTIC(NumDups, "Number of duplicated nodes");
40 STATISTIC(NumCCCopies, "Number of cross class copies");
42 static RegisterScheduler
43 burrListDAGScheduler("list-burr",
44 " Bottom-up register reduction list scheduling",
45 createBURRListDAGScheduler);
46 static RegisterScheduler
47 tdrListrDAGScheduler("list-tdrr",
48 " Top-down register reduction list scheduling",
49 createTDRRListDAGScheduler);
52 //===----------------------------------------------------------------------===//
53 /// ScheduleDAGRRList - The actual register reduction list scheduler
54 /// implementation. This supports both top-down and bottom-up scheduling.
56 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
64 /// AvailableQueue - The priority queue to use for the available SUnits.
65 SchedulingPriorityQueue *AvailableQueue;
67 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
68 /// that are "live". These nodes must be scheduled before any other nodes that
69 /// modifies the registers can be scheduled.
70 SmallSet<unsigned, 4> LiveRegs;
71 std::vector<SUnit*> LiveRegDefs;
72 std::vector<unsigned> LiveRegCycles;
75 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
76 const TargetMachine &tm, bool isbottomup, bool f,
77 SchedulingPriorityQueue *availqueue)
78 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup), Fast(f),
79 AvailableQueue(availqueue) {
82 ~ScheduleDAGRRList() {
83 delete AvailableQueue;
88 /// IsReachable - Checks if SU is reachable from TargetSU.
89 bool IsReachable(SUnit *SU, SUnit *TargetSU);
91 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
93 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
95 /// AddPred - This adds the specified node X as a predecessor of
96 /// the current node Y if not already.
97 /// This returns true if this is a new predecessor.
98 /// Updates the topological ordering if required.
99 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
100 unsigned PhyReg = 0, int Cost = 1);
102 /// RemovePred - This removes the specified node N from the predecessors of
103 /// the current node M. Updates the topological ordering if required.
104 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
107 void ReleasePred(SUnit*, bool, unsigned);
108 void ReleaseSucc(SUnit*, bool isChain, unsigned);
109 void CapturePred(SUnit*, SUnit*, bool);
110 void ScheduleNodeBottomUp(SUnit*, unsigned);
111 void ScheduleNodeTopDown(SUnit*, unsigned);
112 void UnscheduleNodeBottomUp(SUnit*);
113 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
114 SUnit *CopyAndMoveSuccessors(SUnit*);
115 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
116 const TargetRegisterClass*,
117 const TargetRegisterClass*,
118 SmallVector<SUnit*, 2>&);
119 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
120 void ListScheduleTopDown();
121 void ListScheduleBottomUp();
122 void CommuteNodesToReducePressure();
125 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
126 /// Updates the topological ordering if required.
127 SUnit *CreateNewSUnit(SDNode *N) {
128 SUnit *NewNode = NewSUnit(N);
129 // Update the topological ordering.
130 if (NewNode->NodeNum >= Node2Index.size())
131 InitDAGTopologicalSorting();
135 /// CreateClone - Creates a new SUnit from an existing one.
136 /// Updates the topological ordering if required.
137 SUnit *CreateClone(SUnit *N) {
138 SUnit *NewNode = Clone(N);
139 // Update the topological ordering.
140 if (NewNode->NodeNum >= Node2Index.size())
141 InitDAGTopologicalSorting();
145 /// Functions for preserving the topological ordering
146 /// even after dynamic insertions of new edges.
147 /// This allows a very fast implementation of IsReachable.
149 /// InitDAGTopologicalSorting - create the initial topological
150 /// ordering from the DAG to be scheduled.
151 void InitDAGTopologicalSorting();
153 /// DFS - make a DFS traversal and mark all nodes affected by the
154 /// edge insertion. These nodes will later get new topological indexes
155 /// by means of the Shift method.
156 void DFS(SUnit *SU, int UpperBound, bool& HasLoop);
158 /// Shift - reassign topological indexes for the nodes in the DAG
159 /// to preserve the topological ordering.
160 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
162 /// Allocate - assign the topological index to the node n.
163 void Allocate(int n, int index);
165 /// Index2Node - Maps topological index to the node number.
166 std::vector<int> Index2Node;
167 /// Node2Index - Maps the node number to its topological index.
168 std::vector<int> Node2Index;
169 /// Visited - a set of nodes visited during a DFS traversal.
172 } // end anonymous namespace
175 /// Schedule - Schedule the DAG using list scheduling.
176 void ScheduleDAGRRList::Schedule() {
177 DOUT << "********** List Scheduling **********\n";
179 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
180 LiveRegCycles.resize(TRI->getNumRegs(), 0);
182 // Build scheduling units.
185 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
186 SUnits[su].dumpAll(&DAG));
191 InitDAGTopologicalSorting();
193 AvailableQueue->initNodes(SUnits);
195 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
197 ListScheduleBottomUp();
199 ListScheduleTopDown();
201 AvailableQueue->releaseState();
204 CommuteNodesToReducePressure();
206 DOUT << "*** Final schedule ***\n";
207 DEBUG(dumpSchedule());
210 // Emit in scheduled order
214 /// CommuteNodesToReducePressure - If a node is two-address and commutable, and
215 /// it is not the last use of its first operand, add it to the CommuteSet if
216 /// possible. It will be commuted when it is translated to a MI.
217 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
218 SmallPtrSet<SUnit*, 4> OperandSeen;
219 for (unsigned i = Sequence.size(); i != 0; ) {
221 SUnit *SU = Sequence[i];
222 if (!SU || !SU->Node) continue;
223 if (SU->isCommutable) {
224 unsigned Opc = SU->Node->getTargetOpcode();
225 const TargetInstrDesc &TID = TII->get(Opc);
226 unsigned NumRes = TID.getNumDefs();
227 unsigned NumOps = TID.getNumOperands() - NumRes;
228 for (unsigned j = 0; j != NumOps; ++j) {
229 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
232 SDNode *OpN = SU->Node->getOperand(j).Val;
233 SUnit *OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
234 if (OpSU && OperandSeen.count(OpSU) == 1) {
235 // Ok, so SU is not the last use of OpSU, but SU is two-address so
236 // it will clobber OpSU. Try to commute SU if no other source operands
238 bool DoCommute = true;
239 for (unsigned k = 0; k < NumOps; ++k) {
241 OpN = SU->Node->getOperand(k).Val;
242 OpSU = isPassiveNode(OpN) ? NULL : &SUnits[OpN->getNodeId()];
243 if (OpSU && OperandSeen.count(OpSU) == 1) {
250 CommuteSet.insert(SU->Node);
253 // Only look at the first use&def node for now.
258 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
261 OperandSeen.insert(I->Dep->OrigNode);
266 //===----------------------------------------------------------------------===//
267 // Bottom-Up Scheduling
268 //===----------------------------------------------------------------------===//
270 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
271 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
272 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
274 // FIXME: the distance between two nodes is not always == the predecessor's
275 // latency. For example, the reader can very well read the register written
276 // by the predecessor later than the issue cycle. It also depends on the
277 // interrupt model (drain vs. freeze).
278 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
280 --PredSU->NumSuccsLeft;
283 if (PredSU->NumSuccsLeft < 0) {
284 cerr << "*** List scheduling failed! ***\n";
286 cerr << " has been released too many times!\n";
291 if (PredSU->NumSuccsLeft == 0) {
292 PredSU->isAvailable = true;
293 AvailableQueue->push(PredSU);
297 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
298 /// count of its predecessors. If a predecessor pending count is zero, add it to
299 /// the Available queue.
300 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
301 DOUT << "*** Scheduling [" << CurCycle << "]: ";
302 DEBUG(SU->dump(&DAG));
303 SU->Cycle = CurCycle;
305 AvailableQueue->ScheduledNode(SU);
307 // Bottom up: release predecessors
308 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
310 ReleasePred(I->Dep, I->isCtrl, CurCycle);
312 // This is a physical register dependency and it's impossible or
313 // expensive to copy the register. Make sure nothing that can
314 // clobber the register is scheduled between the predecessor and
316 if (LiveRegs.insert(I->Reg)) {
317 LiveRegDefs[I->Reg] = I->Dep;
318 LiveRegCycles[I->Reg] = CurCycle;
323 // Release all the implicit physical register defs that are live.
324 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
327 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
328 LiveRegs.erase(I->Reg);
329 assert(LiveRegDefs[I->Reg] == SU &&
330 "Physical register dependency violated?");
331 LiveRegDefs[I->Reg] = NULL;
332 LiveRegCycles[I->Reg] = 0;
337 SU->isScheduled = true;
340 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
341 /// unscheduled, incrcease the succ left count of its predecessors. Remove
342 /// them from AvailableQueue if necessary.
343 void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
344 unsigned CycleBound = 0;
345 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
349 CycleBound = std::max(CycleBound,
350 I->Dep->Cycle + PredSU->Latency);
353 if (PredSU->isAvailable) {
354 PredSU->isAvailable = false;
355 if (!PredSU->isPending)
356 AvailableQueue->remove(PredSU);
359 PredSU->CycleBound = CycleBound;
360 ++PredSU->NumSuccsLeft;
363 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
364 /// its predecessor states to reflect the change.
365 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
366 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
367 DEBUG(SU->dump(&DAG));
369 AvailableQueue->UnscheduledNode(SU);
371 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
373 CapturePred(I->Dep, SU, I->isCtrl);
374 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
375 LiveRegs.erase(I->Reg);
376 assert(LiveRegDefs[I->Reg] == I->Dep &&
377 "Physical register dependency violated?");
378 LiveRegDefs[I->Reg] = NULL;
379 LiveRegCycles[I->Reg] = 0;
383 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
386 if (LiveRegs.insert(I->Reg)) {
387 assert(!LiveRegDefs[I->Reg] &&
388 "Physical register dependency violated?");
389 LiveRegDefs[I->Reg] = SU;
391 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
392 LiveRegCycles[I->Reg] = I->Dep->Cycle;
397 SU->isScheduled = false;
398 SU->isAvailable = true;
399 AvailableQueue->push(SU);
402 /// IsReachable - Checks if SU is reachable from TargetSU.
403 bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
404 // If insertion of the edge SU->TargetSU would create a cycle
405 // then there is a path from TargetSU to SU.
406 int UpperBound, LowerBound;
407 LowerBound = Node2Index[TargetSU->NodeNum];
408 UpperBound = Node2Index[SU->NodeNum];
409 bool HasLoop = false;
410 // Is Ord(TargetSU) < Ord(SU) ?
411 if (LowerBound < UpperBound) {
413 // There may be a path from TargetSU to SU. Check for it.
414 DFS(TargetSU, UpperBound, HasLoop);
419 /// Allocate - assign the topological index to the node n.
420 inline void ScheduleDAGRRList::Allocate(int n, int index) {
421 Node2Index[n] = index;
422 Index2Node[index] = n;
425 /// InitDAGTopologicalSorting - create the initial topological
426 /// ordering from the DAG to be scheduled.
428 /// The idea of the algorithm is taken from
429 /// "Online algorithms for managing the topological order of
430 /// a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
431 /// This is the MNR algorithm, which was first introduced by
432 /// A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
433 /// "Maintaining a topological order under edge insertions".
435 /// Short description of the algorithm:
437 /// Topological ordering, ord, of a DAG maps each node to a topological
438 /// index so that for all edges X->Y it is the case that ord(X) < ord(Y).
440 /// This means that if there is a path from the node X to the node Z,
441 /// then ord(X) < ord(Z).
443 /// This property can be used to check for reachability of nodes:
444 /// if Z is reachable from X, then an insertion of the edge Z->X would
447 /// The algorithm first computes a topological ordering for the DAG by
448 /// initializing the Index2Node and Node2Index arrays and then tries to keep
449 /// the ordering up-to-date after edge insertions by reordering the DAG.
451 /// On insertion of the edge X->Y, the algorithm first marks by calling DFS
452 /// the nodes reachable from Y, and then shifts them using Shift to lie
453 /// immediately after X in Index2Node.
454 void ScheduleDAGRRList::InitDAGTopologicalSorting() {
455 unsigned DAGSize = SUnits.size();
456 std::vector<unsigned> InDegree(DAGSize);
457 std::vector<SUnit*> WorkList;
458 WorkList.reserve(DAGSize);
459 std::vector<SUnit*> TopOrder;
460 TopOrder.reserve(DAGSize);
462 // Initialize the data structures.
463 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
464 SUnit *SU = &SUnits[i];
465 int NodeNum = SU->NodeNum;
466 unsigned Degree = SU->Succs.size();
467 InDegree[NodeNum] = Degree;
469 // Is it a node without dependencies?
471 assert(SU->Succs.empty() && "SUnit should have no successors");
472 // Collect leaf nodes.
473 WorkList.push_back(SU);
477 while (!WorkList.empty()) {
478 SUnit *SU = WorkList.back();
480 TopOrder.push_back(SU);
481 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
484 if (!--InDegree[SU->NodeNum])
485 // If all dependencies of the node are processed already,
486 // then the node can be computed now.
487 WorkList.push_back(SU);
491 // Second pass, assign the actual topological order as node ids.
496 Index2Node.resize(DAGSize);
497 Node2Index.resize(DAGSize);
498 Visited.resize(DAGSize);
500 for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(),
501 TE = TopOrder.rend();TI != TE; ++TI) {
502 Allocate((*TI)->NodeNum, Id);
507 // Check correctness of the ordering
508 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
509 SUnit *SU = &SUnits[i];
510 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
512 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
513 "Wrong topological sorting");
519 /// AddPred - adds an edge from SUnit X to SUnit Y.
520 /// Updates the topological ordering if required.
521 bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
522 unsigned PhyReg, int Cost) {
523 int UpperBound, LowerBound;
524 LowerBound = Node2Index[Y->NodeNum];
525 UpperBound = Node2Index[X->NodeNum];
526 bool HasLoop = false;
527 // Is Ord(X) < Ord(Y) ?
528 if (LowerBound < UpperBound) {
529 // Update the topological order.
531 DFS(Y, UpperBound, HasLoop);
532 assert(!HasLoop && "Inserted edge creates a loop!");
533 // Recompute topological indexes.
534 Shift(Visited, LowerBound, UpperBound);
536 // Now really insert the edge.
537 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
540 /// RemovePred - This removes the specified node N from the predecessors of
541 /// the current node M. Updates the topological ordering if required.
542 bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
543 bool isCtrl, bool isSpecial) {
544 // InitDAGTopologicalSorting();
545 return M->removePred(N, isCtrl, isSpecial);
548 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
549 /// all nodes affected by the edge insertion. These nodes will later get new
550 /// topological indexes by means of the Shift method.
551 void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
552 std::vector<SUnit*> WorkList;
553 WorkList.reserve(SUnits.size());
555 WorkList.push_back(SU);
556 while (!WorkList.empty()) {
557 SU = WorkList.back();
559 Visited.set(SU->NodeNum);
560 for (int I = SU->Succs.size()-1; I >= 0; --I) {
561 int s = SU->Succs[I].Dep->NodeNum;
562 if (Node2Index[s] == UpperBound) {
566 // Visit successors if not already and in affected region.
567 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
568 WorkList.push_back(SU->Succs[I].Dep);
574 /// Shift - Renumber the nodes so that the topological ordering is
576 void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
582 for (i = LowerBound; i <= UpperBound; ++i) {
583 // w is node at topological index i.
584 int w = Index2Node[i];
585 if (Visited.test(w)) {
591 Allocate(w, i - shift);
595 for (unsigned j = 0; j < L.size(); ++j) {
596 Allocate(L[j], i - shift);
602 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
604 bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
605 if (IsReachable(TargetSU, SU))
607 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
609 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
614 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
615 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
616 /// SUnit. Also returns if a successor is unscheduled in the process.
617 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
618 unsigned &CurCycle) {
620 while (CurCycle > BtCycle) {
621 OldSU = Sequence.back();
623 if (SU->isSucc(OldSU))
624 // Don't try to remove SU from AvailableQueue.
625 SU->isAvailable = false;
626 UnscheduleNodeBottomUp(OldSU);
631 if (SU->isSucc(OldSU)) {
632 assert(false && "Something is wrong!");
639 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
640 /// successors to the newly created node.
641 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
642 if (SU->FlaggedNodes.size())
645 SDNode *N = SU->Node;
650 bool TryUnfold = false;
651 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
652 MVT VT = N->getValueType(i);
655 else if (VT == MVT::Other)
658 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
659 const SDOperand &Op = N->getOperand(i);
660 MVT VT = Op.Val->getValueType(Op.ResNo);
666 SmallVector<SDNode*, 2> NewNodes;
667 if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
670 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
671 assert(NewNodes.size() == 2 && "Expected a load folding node!");
674 SDNode *LoadNode = NewNodes[0];
675 unsigned NumVals = N->getNumValues();
676 unsigned OldNumVals = SU->Node->getNumValues();
677 for (unsigned i = 0; i != NumVals; ++i)
678 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i));
679 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1),
680 SDOperand(LoadNode, 1));
682 SUnit *NewSU = CreateNewSUnit(N);
683 assert(N->getNodeId() == -1 && "Node already inserted!");
684 N->setNodeId(NewSU->NodeNum);
686 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
687 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
688 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
689 NewSU->isTwoAddress = true;
693 if (TID.isCommutable())
694 NewSU->isCommutable = true;
695 // FIXME: Calculate height / depth and propagate the changes?
696 NewSU->Depth = SU->Depth;
697 NewSU->Height = SU->Height;
698 ComputeLatency(NewSU);
700 // LoadNode may already exist. This can happen when there is another
701 // load from the same location and producing the same type of value
702 // but it has different alignment or volatileness.
703 bool isNewLoad = true;
705 if (LoadNode->getNodeId() != -1) {
706 LoadSU = &SUnits[LoadNode->getNodeId()];
709 LoadSU = CreateNewSUnit(LoadNode);
710 LoadNode->setNodeId(LoadSU->NodeNum);
712 LoadSU->Depth = SU->Depth;
713 LoadSU->Height = SU->Height;
714 ComputeLatency(LoadSU);
717 SUnit *ChainPred = NULL;
718 SmallVector<SDep, 4> ChainSuccs;
719 SmallVector<SDep, 4> LoadPreds;
720 SmallVector<SDep, 4> NodePreds;
721 SmallVector<SDep, 4> NodeSuccs;
722 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
726 else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
727 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
729 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
731 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
734 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
735 I->isCtrl, I->isSpecial));
737 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
738 I->isCtrl, I->isSpecial));
742 RemovePred(SU, ChainPred, true, false);
744 AddPred(LoadSU, ChainPred, true, false);
746 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
747 SDep *Pred = &LoadPreds[i];
748 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
750 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
751 Pred->Reg, Pred->Cost);
754 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
755 SDep *Pred = &NodePreds[i];
756 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
757 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
758 Pred->Reg, Pred->Cost);
760 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
761 SDep *Succ = &NodeSuccs[i];
762 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
763 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
764 Succ->Reg, Succ->Cost);
766 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
767 SDep *Succ = &ChainSuccs[i];
768 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
770 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
771 Succ->Reg, Succ->Cost);
775 AddPred(NewSU, LoadSU, false, false);
779 AvailableQueue->addNode(LoadSU);
780 AvailableQueue->addNode(NewSU);
784 if (NewSU->NumSuccsLeft == 0) {
785 NewSU->isAvailable = true;
791 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
792 NewSU = CreateClone(SU);
794 // New SUnit has the exact same predecessors.
795 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
798 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
799 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
802 // Only copy scheduled successors. Cut them from old node's successor
803 // list and move them over.
804 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
805 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
809 if (I->Dep->isScheduled) {
810 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
811 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
812 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
815 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
816 SUnit *Succ = DelDeps[i].first;
817 bool isCtrl = DelDeps[i].second;
818 RemovePred(Succ, SU, isCtrl, false);
821 AvailableQueue->updateNode(SU);
822 AvailableQueue->addNode(NewSU);
828 /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
829 /// and move all scheduled successors of the given SUnit to the last copy.
830 void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
831 const TargetRegisterClass *DestRC,
832 const TargetRegisterClass *SrcRC,
833 SmallVector<SUnit*, 2> &Copies) {
834 SUnit *CopyFromSU = CreateNewSUnit(NULL);
835 CopyFromSU->CopySrcRC = SrcRC;
836 CopyFromSU->CopyDstRC = DestRC;
837 CopyFromSU->Depth = SU->Depth;
838 CopyFromSU->Height = SU->Height;
840 SUnit *CopyToSU = CreateNewSUnit(NULL);
841 CopyToSU->CopySrcRC = DestRC;
842 CopyToSU->CopyDstRC = SrcRC;
844 // Only copy scheduled successors. Cut them from old node's successor
845 // list and move them over.
846 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
847 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
851 if (I->Dep->isScheduled) {
852 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
853 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
854 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
857 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
858 SUnit *Succ = DelDeps[i].first;
859 bool isCtrl = DelDeps[i].second;
860 RemovePred(Succ, SU, isCtrl, false);
863 AddPred(CopyFromSU, SU, false, false, Reg, -1);
864 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
866 AvailableQueue->updateNode(SU);
867 AvailableQueue->addNode(CopyFromSU);
868 AvailableQueue->addNode(CopyToSU);
869 Copies.push_back(CopyFromSU);
870 Copies.push_back(CopyToSU);
875 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
876 /// definition of the specified node.
877 /// FIXME: Move to SelectionDAG?
878 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
879 const TargetInstrInfo *TII) {
880 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
881 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
882 unsigned NumRes = TID.getNumDefs();
883 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
888 return N->getValueType(NumRes);
891 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
892 /// scheduling of the given node to satisfy live physical register dependencies.
893 /// If the specific node is the last one that's available to schedule, do
894 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
895 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
896 SmallVector<unsigned, 4> &LRegs){
897 if (LiveRegs.empty())
900 SmallSet<unsigned, 4> RegAdded;
901 // If this node would clobber any "live" register, then it's not ready.
902 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
905 unsigned Reg = I->Reg;
906 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
907 if (RegAdded.insert(Reg))
908 LRegs.push_back(Reg);
910 for (const unsigned *Alias = TRI->getAliasSet(Reg);
912 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
913 if (RegAdded.insert(*Alias))
914 LRegs.push_back(*Alias);
919 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
920 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
921 if (!Node || !Node->isTargetOpcode())
923 const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode());
924 if (!TID.ImplicitDefs)
926 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
927 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
928 if (RegAdded.insert(*Reg))
929 LRegs.push_back(*Reg);
931 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
933 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
934 if (RegAdded.insert(*Alias))
935 LRegs.push_back(*Alias);
939 return !LRegs.empty();
943 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
945 void ScheduleDAGRRList::ListScheduleBottomUp() {
946 unsigned CurCycle = 0;
947 // Add root to Available queue.
948 if (!SUnits.empty()) {
949 SUnit *RootSU = &SUnits[DAG.getRoot().Val->getNodeId()];
950 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
951 RootSU->isAvailable = true;
952 AvailableQueue->push(RootSU);
955 // While Available queue is not empty, grab the node with the highest
956 // priority. If it is not ready put it back. Schedule the node.
957 SmallVector<SUnit*, 4> NotReady;
958 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
959 Sequence.reserve(SUnits.size());
960 while (!AvailableQueue->empty()) {
961 bool Delayed = false;
963 SUnit *CurSU = AvailableQueue->pop();
965 if (CurSU->CycleBound <= CurCycle) {
966 SmallVector<unsigned, 4> LRegs;
967 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
970 LRegsMap.insert(std::make_pair(CurSU, LRegs));
973 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
974 NotReady.push_back(CurSU);
975 CurSU = AvailableQueue->pop();
978 // All candidates are delayed due to live physical reg dependencies.
979 // Try backtracking, code duplication, or inserting cross class copies
981 if (Delayed && !CurSU) {
982 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
983 SUnit *TrySU = NotReady[i];
984 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
986 // Try unscheduling up to the point where it's safe to schedule
988 unsigned LiveCycle = CurCycle;
989 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
990 unsigned Reg = LRegs[j];
991 unsigned LCycle = LiveRegCycles[Reg];
992 LiveCycle = std::min(LiveCycle, LCycle);
994 SUnit *OldSU = Sequence[LiveCycle];
995 if (!WillCreateCycle(TrySU, OldSU)) {
996 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
997 // Force the current node to be scheduled before the node that
998 // requires the physical reg dep.
999 if (OldSU->isAvailable) {
1000 OldSU->isAvailable = false;
1001 AvailableQueue->remove(OldSU);
1003 AddPred(TrySU, OldSU, true, true);
1004 // If one or more successors has been unscheduled, then the current
1005 // node is no longer avaialable. Schedule a successor that's now
1006 // available instead.
1007 if (!TrySU->isAvailable)
1008 CurSU = AvailableQueue->pop();
1011 TrySU->isPending = false;
1012 NotReady.erase(NotReady.begin()+i);
1019 // Can't backtrack. Try duplicating the nodes that produces these
1020 // "expensive to copy" values to break the dependency. In case even
1021 // that doesn't work, insert cross class copies.
1022 SUnit *TrySU = NotReady[0];
1023 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1024 assert(LRegs.size() == 1 && "Can't handle this yet!");
1025 unsigned Reg = LRegs[0];
1026 SUnit *LRDef = LiveRegDefs[Reg];
1027 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1029 // Issue expensive cross register class copies.
1030 MVT VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
1031 const TargetRegisterClass *RC =
1032 TRI->getPhysicalRegisterRegClass(Reg, VT);
1033 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1035 assert(false && "Don't know how to copy this physical register!");
1038 SmallVector<SUnit*, 2> Copies;
1039 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1040 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1041 << " to SU #" << Copies.front()->NodeNum << "\n";
1042 AddPred(TrySU, Copies.front(), true, true);
1043 NewDef = Copies.back();
1046 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1047 << " to SU #" << TrySU->NodeNum << "\n";
1048 LiveRegDefs[Reg] = NewDef;
1049 AddPred(NewDef, TrySU, true, true);
1050 TrySU->isAvailable = false;
1055 assert(false && "Unable to resolve live physical register dependencies!");
1060 // Add the nodes that aren't ready back onto the available list.
1061 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1062 NotReady[i]->isPending = false;
1063 // May no longer be available due to backtracking.
1064 if (NotReady[i]->isAvailable)
1065 AvailableQueue->push(NotReady[i]);
1070 Sequence.push_back(0);
1072 ScheduleNodeBottomUp(CurSU, CurCycle);
1073 Sequence.push_back(CurSU);
1078 // Reverse the order if it is bottom up.
1079 std::reverse(Sequence.begin(), Sequence.end());
1083 // Verify that all SUnits were scheduled.
1084 bool AnyNotSched = false;
1085 unsigned DeadNodes = 0;
1087 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1088 if (!SUnits[i].isScheduled) {
1089 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1094 cerr << "*** List scheduling failed! ***\n";
1095 SUnits[i].dump(&DAG);
1096 cerr << "has not been scheduled!\n";
1099 if (SUnits[i].NumSuccsLeft != 0) {
1101 cerr << "*** List scheduling failed! ***\n";
1102 SUnits[i].dump(&DAG);
1103 cerr << "has successors left!\n";
1107 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1110 assert(!AnyNotSched);
1111 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1112 "The number of nodes scheduled doesn't match the expected number!");
1116 //===----------------------------------------------------------------------===//
1117 // Top-Down Scheduling
1118 //===----------------------------------------------------------------------===//
1120 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1121 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1122 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
1123 unsigned CurCycle) {
1124 // FIXME: the distance between two nodes is not always == the predecessor's
1125 // latency. For example, the reader can very well read the register written
1126 // by the predecessor later than the issue cycle. It also depends on the
1127 // interrupt model (drain vs. freeze).
1128 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
1130 --SuccSU->NumPredsLeft;
1133 if (SuccSU->NumPredsLeft < 0) {
1134 cerr << "*** List scheduling failed! ***\n";
1136 cerr << " has been released too many times!\n";
1141 if (SuccSU->NumPredsLeft == 0) {
1142 SuccSU->isAvailable = true;
1143 AvailableQueue->push(SuccSU);
1148 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1149 /// count of its successors. If a successor pending count is zero, add it to
1150 /// the Available queue.
1151 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1152 DOUT << "*** Scheduling [" << CurCycle << "]: ";
1153 DEBUG(SU->dump(&DAG));
1154 SU->Cycle = CurCycle;
1156 AvailableQueue->ScheduledNode(SU);
1158 // Top down: release successors
1159 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1161 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
1162 SU->isScheduled = true;
1165 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1167 void ScheduleDAGRRList::ListScheduleTopDown() {
1168 unsigned CurCycle = 0;
1170 // All leaves to Available queue.
1171 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1172 // It is available if it has no predecessors.
1173 if (SUnits[i].Preds.empty()) {
1174 AvailableQueue->push(&SUnits[i]);
1175 SUnits[i].isAvailable = true;
1179 // While Available queue is not empty, grab the node with the highest
1180 // priority. If it is not ready put it back. Schedule the node.
1181 std::vector<SUnit*> NotReady;
1182 Sequence.reserve(SUnits.size());
1183 while (!AvailableQueue->empty()) {
1184 SUnit *CurSU = AvailableQueue->pop();
1185 while (CurSU && CurSU->CycleBound > CurCycle) {
1186 NotReady.push_back(CurSU);
1187 CurSU = AvailableQueue->pop();
1190 // Add the nodes that aren't ready back onto the available list.
1191 AvailableQueue->push_all(NotReady);
1195 Sequence.push_back(0);
1197 ScheduleNodeTopDown(CurSU, CurCycle);
1198 Sequence.push_back(CurSU);
1205 // Verify that all SUnits were scheduled.
1206 bool AnyNotSched = false;
1207 unsigned DeadNodes = 0;
1209 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1210 if (!SUnits[i].isScheduled) {
1211 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1216 cerr << "*** List scheduling failed! ***\n";
1217 SUnits[i].dump(&DAG);
1218 cerr << "has not been scheduled!\n";
1221 if (SUnits[i].NumPredsLeft != 0) {
1223 cerr << "*** List scheduling failed! ***\n";
1224 SUnits[i].dump(&DAG);
1225 cerr << "has predecessors left!\n";
1229 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1232 assert(!AnyNotSched);
1233 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1234 "The number of nodes scheduled doesn't match the expected number!");
1240 //===----------------------------------------------------------------------===//
1241 // RegReductionPriorityQueue Implementation
1242 //===----------------------------------------------------------------------===//
1244 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1245 // to reduce register pressure.
1249 class RegReductionPriorityQueue;
1251 /// Sorting functions for the Available queue.
1252 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1253 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1254 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1255 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1257 bool operator()(const SUnit* left, const SUnit* right) const;
1260 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1261 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1262 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1263 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1265 bool operator()(const SUnit* left, const SUnit* right) const;
1267 } // end anonymous namespace
1269 static inline bool isCopyFromLiveIn(const SUnit *SU) {
1270 SDNode *N = SU->Node;
1271 return N && N->getOpcode() == ISD::CopyFromReg &&
1272 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1277 class VISIBILITY_HIDDEN RegReductionPriorityQueue
1278 : public SchedulingPriorityQueue {
1279 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
1280 unsigned currentQueueId;
1283 RegReductionPriorityQueue() :
1284 Queue(SF(this)), currentQueueId(0) {}
1286 virtual void initNodes(std::vector<SUnit> &sunits) {}
1288 virtual void addNode(const SUnit *SU) {}
1290 virtual void updateNode(const SUnit *SU) {}
1292 virtual void releaseState() {}
1294 virtual unsigned getNodePriority(const SUnit *SU) const {
1298 unsigned size() const { return Queue.size(); }
1300 bool empty() const { return Queue.empty(); }
1302 void push(SUnit *U) {
1303 assert(!U->NodeQueueId && "Node in the queue already");
1304 U->NodeQueueId = ++currentQueueId;
1308 void push_all(const std::vector<SUnit *> &Nodes) {
1309 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1314 if (empty()) return NULL;
1315 SUnit *V = Queue.top();
1321 void remove(SUnit *SU) {
1322 assert(!Queue.empty() && "Queue is empty!");
1323 assert(SU->NodeQueueId != 0 && "Not in queue!");
1324 Queue.erase_one(SU);
1325 SU->NodeQueueId = 0;
1329 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
1330 : public RegReductionPriorityQueue<bu_ls_rr_sort> {
1331 // SUnits - The SUnits for the current graph.
1332 const std::vector<SUnit> *SUnits;
1334 // SethiUllmanNumbers - The SethiUllman number for each node.
1335 std::vector<unsigned> SethiUllmanNumbers;
1337 const TargetInstrInfo *TII;
1338 const TargetRegisterInfo *TRI;
1339 ScheduleDAGRRList *scheduleDAG;
1343 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
1344 const TargetRegisterInfo *tri,
1346 : TII(tii), TRI(tri), scheduleDAG(NULL), Fast(f) {}
1348 void initNodes(std::vector<SUnit> &sunits) {
1350 // Add pseudo dependency edges for two-address nodes.
1352 AddPseudoTwoAddrDeps();
1353 // Calculate node priorities.
1354 CalculateSethiUllmanNumbers();
1357 void addNode(const SUnit *SU) {
1358 SethiUllmanNumbers.resize(SUnits->size(), 0);
1359 CalcNodeSethiUllmanNumber(SU);
1362 void updateNode(const SUnit *SU) {
1363 SethiUllmanNumbers[SU->NodeNum] = 0;
1364 CalcNodeSethiUllmanNumber(SU);
1367 void releaseState() {
1369 SethiUllmanNumbers.clear();
1372 unsigned getNodePriority(const SUnit *SU) const {
1373 assert(SU->NodeNum < SethiUllmanNumbers.size());
1374 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1375 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1376 // CopyFromReg should be close to its def because it restricts
1377 // allocation choices. But if it is a livein then perhaps we want it
1378 // closer to its uses so it can be coalesced.
1380 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1381 // CopyToReg should be close to its uses to facilitate coalescing and
1384 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1385 Opc == TargetInstrInfo::INSERT_SUBREG)
1386 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1387 // facilitate coalescing.
1389 else if (SU->NumSuccs == 0)
1390 // If SU does not have a use, i.e. it doesn't produce a value that would
1391 // be consumed (e.g. store), then it terminates a chain of computation.
1392 // Give it a large SethiUllman number so it will be scheduled right
1393 // before its predecessors that it doesn't lengthen their live ranges.
1395 else if (SU->NumPreds == 0)
1396 // If SU does not have a def, schedule it close to its uses because it
1397 // does not lengthen any live ranges.
1400 return SethiUllmanNumbers[SU->NodeNum];
1403 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1404 scheduleDAG = scheduleDag;
1408 bool canClobber(const SUnit *SU, const SUnit *Op);
1409 void AddPseudoTwoAddrDeps();
1410 void CalculateSethiUllmanNumbers();
1411 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1415 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1416 : public RegReductionPriorityQueue<td_ls_rr_sort> {
1417 // SUnits - The SUnits for the current graph.
1418 const std::vector<SUnit> *SUnits;
1420 // SethiUllmanNumbers - The SethiUllman number for each node.
1421 std::vector<unsigned> SethiUllmanNumbers;
1424 TDRegReductionPriorityQueue() {}
1426 void initNodes(std::vector<SUnit> &sunits) {
1428 // Calculate node priorities.
1429 CalculateSethiUllmanNumbers();
1432 void addNode(const SUnit *SU) {
1433 SethiUllmanNumbers.resize(SUnits->size(), 0);
1434 CalcNodeSethiUllmanNumber(SU);
1437 void updateNode(const SUnit *SU) {
1438 SethiUllmanNumbers[SU->NodeNum] = 0;
1439 CalcNodeSethiUllmanNumber(SU);
1442 void releaseState() {
1444 SethiUllmanNumbers.clear();
1447 unsigned getNodePriority(const SUnit *SU) const {
1448 assert(SU->NodeNum < SethiUllmanNumbers.size());
1449 return SethiUllmanNumbers[SU->NodeNum];
1453 void CalculateSethiUllmanNumbers();
1454 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1458 /// closestSucc - Returns the scheduled cycle of the successor which is
1459 /// closet to the current cycle.
1460 static unsigned closestSucc(const SUnit *SU) {
1461 unsigned MaxCycle = 0;
1462 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1464 unsigned Cycle = I->Dep->Cycle;
1465 // If there are bunch of CopyToRegs stacked up, they should be considered
1466 // to be at the same position.
1467 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
1468 Cycle = closestSucc(I->Dep)+1;
1469 if (Cycle > MaxCycle)
1475 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1476 /// for scratch registers. Live-in operands and live-out results don't count
1477 /// since they are "fixed".
1478 static unsigned calcMaxScratches(const SUnit *SU) {
1479 unsigned Scratches = 0;
1480 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1482 if (I->isCtrl) continue; // ignore chain preds
1483 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
1486 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1488 if (I->isCtrl) continue; // ignore chain succs
1489 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
1496 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1498 unsigned LPriority = SPQ->getNodePriority(left);
1499 unsigned RPriority = SPQ->getNodePriority(right);
1500 if (LPriority != RPriority)
1501 return LPriority > RPriority;
1503 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1508 // and the following instructions are both ready.
1512 // Then schedule t2 = op first.
1519 // This creates more short live intervals.
1520 unsigned LDist = closestSucc(left);
1521 unsigned RDist = closestSucc(right);
1523 return LDist < RDist;
1525 // Intuitively, it's good to push down instructions whose results are
1526 // liveout so their long live ranges won't conflict with other values
1527 // which are needed inside the BB. Further prioritize liveout instructions
1528 // by the number of operands which are calculated within the BB.
1529 unsigned LScratch = calcMaxScratches(left);
1530 unsigned RScratch = calcMaxScratches(right);
1531 if (LScratch != RScratch)
1532 return LScratch > RScratch;
1534 if (left->Height != right->Height)
1535 return left->Height > right->Height;
1537 if (left->Depth != right->Depth)
1538 return left->Depth < right->Depth;
1540 if (left->CycleBound != right->CycleBound)
1541 return left->CycleBound > right->CycleBound;
1543 assert(left->NodeQueueId && right->NodeQueueId &&
1544 "NodeQueueId cannot be zero");
1545 return (left->NodeQueueId > right->NodeQueueId);
1549 BURegReductionPriorityQueue::canClobber(const SUnit *SU, const SUnit *Op) {
1550 if (SU->isTwoAddress) {
1551 unsigned Opc = SU->Node->getTargetOpcode();
1552 const TargetInstrDesc &TID = TII->get(Opc);
1553 unsigned NumRes = TID.getNumDefs();
1554 unsigned NumOps = TID.getNumOperands() - NumRes;
1555 for (unsigned i = 0; i != NumOps; ++i) {
1556 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1557 SDNode *DU = SU->Node->getOperand(i).Val;
1558 if (DU->getNodeId() != -1 &&
1559 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1568 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1570 static bool hasCopyToRegUse(SUnit *SU) {
1571 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1573 if (I->isCtrl) continue;
1574 SUnit *SuccSU = I->Dep;
1575 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
1581 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1582 /// physical register defs.
1583 static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
1584 const TargetInstrInfo *TII,
1585 const TargetRegisterInfo *TRI) {
1586 SDNode *N = SuccSU->Node;
1587 unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs();
1588 const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs();
1589 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1590 const unsigned *SUImpDefs =
1591 TII->get(SU->Node->getTargetOpcode()).getImplicitDefs();
1594 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1595 MVT VT = N->getValueType(i);
1596 if (VT == MVT::Flag || VT == MVT::Other)
1598 unsigned Reg = ImpDefs[i - NumDefs];
1599 for (;*SUImpDefs; ++SUImpDefs) {
1600 unsigned SUReg = *SUImpDefs;
1601 if (TRI->regsOverlap(Reg, SUReg))
1608 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1609 /// it as a def&use operand. Add a pseudo control edge from it to the other
1610 /// node (if it won't create a cycle) so the two-address one will be scheduled
1611 /// first (lower in the schedule). If both nodes are two-address, favor the
1612 /// one that has a CopyToReg use (more likely to be a loop induction update).
1613 /// If both are two-address, but one is commutable while the other is not
1614 /// commutable, favor the one that's not commutable.
1615 void BURegReductionPriorityQueue::AddPseudoTwoAddrDeps() {
1616 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1617 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1618 if (!SU->isTwoAddress)
1621 SDNode *Node = SU->Node;
1622 if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0)
1625 unsigned Opc = Node->getTargetOpcode();
1626 const TargetInstrDesc &TID = TII->get(Opc);
1627 unsigned NumRes = TID.getNumDefs();
1628 unsigned NumOps = TID.getNumOperands() - NumRes;
1629 for (unsigned j = 0; j != NumOps; ++j) {
1630 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
1631 SDNode *DU = SU->Node->getOperand(j).Val;
1632 if (DU->getNodeId() == -1)
1634 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1635 if (!DUSU) continue;
1636 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1637 E = DUSU->Succs.end(); I != E; ++I) {
1638 if (I->isCtrl) continue;
1639 SUnit *SuccSU = I->Dep;
1642 // Be conservative. Ignore if nodes aren't at roughly the same
1643 // depth and height.
1644 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1646 if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode())
1648 // Don't constrain nodes with physical register defs if the
1649 // predecessor can clobber them.
1650 if (SuccSU->hasPhysRegDefs) {
1651 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1654 // Don't constraint extract_subreg / insert_subreg these may be
1655 // coalesced away. We don't them close to their uses.
1656 unsigned SuccOpc = SuccSU->Node->getTargetOpcode();
1657 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1658 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1660 if ((!canClobber(SuccSU, DUSU) ||
1661 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1662 (!SU->isCommutable && SuccSU->isCommutable)) &&
1663 !scheduleDAG->IsReachable(SuccSU, SU)) {
1664 DOUT << "Adding an edge from SU # " << SU->NodeNum
1665 << " to SU #" << SuccSU->NodeNum << "\n";
1666 scheduleDAG->AddPred(SU, SuccSU, true, true);
1674 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1675 /// Smaller number is the higher priority.
1676 unsigned BURegReductionPriorityQueue::
1677 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1678 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1679 if (SethiUllmanNumber != 0)
1680 return SethiUllmanNumber;
1683 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1685 if (I->isCtrl) continue; // ignore chain preds
1686 SUnit *PredSU = I->Dep;
1687 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1688 if (PredSethiUllman > SethiUllmanNumber) {
1689 SethiUllmanNumber = PredSethiUllman;
1691 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1695 SethiUllmanNumber += Extra;
1697 if (SethiUllmanNumber == 0)
1698 SethiUllmanNumber = 1;
1700 return SethiUllmanNumber;
1703 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1704 /// scheduling units.
1705 void BURegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1706 SethiUllmanNumbers.assign(SUnits->size(), 0);
1708 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1709 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1712 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1713 /// predecessors of the successors of the SUnit SU. Stop when the provided
1714 /// limit is exceeded.
1715 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1718 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1720 SUnit *SuccSU = I->Dep;
1721 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1722 EE = SuccSU->Preds.end(); II != EE; ++II) {
1723 SUnit *PredSU = II->Dep;
1724 if (!PredSU->isScheduled)
1734 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1735 unsigned LPriority = SPQ->getNodePriority(left);
1736 unsigned RPriority = SPQ->getNodePriority(right);
1737 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1738 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
1739 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1740 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1741 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1742 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1744 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1746 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1753 if (left->NumSuccs == 1)
1755 if (right->NumSuccs == 1)
1758 if (LPriority+LBonus != RPriority+RBonus)
1759 return LPriority+LBonus < RPriority+RBonus;
1761 if (left->Depth != right->Depth)
1762 return left->Depth < right->Depth;
1764 if (left->NumSuccsLeft != right->NumSuccsLeft)
1765 return left->NumSuccsLeft > right->NumSuccsLeft;
1767 if (left->CycleBound != right->CycleBound)
1768 return left->CycleBound > right->CycleBound;
1770 assert(left->NodeQueueId && right->NodeQueueId &&
1771 "NodeQueueId cannot be zero");
1772 return (left->NodeQueueId > right->NodeQueueId);
1775 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1776 /// Smaller number is the higher priority.
1777 unsigned TDRegReductionPriorityQueue::
1778 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1779 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1780 if (SethiUllmanNumber != 0)
1781 return SethiUllmanNumber;
1783 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1784 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1785 SethiUllmanNumber = 0xffff;
1786 else if (SU->NumSuccsLeft == 0)
1787 // If SU does not have a use, i.e. it doesn't produce a value that would
1788 // be consumed (e.g. store), then it terminates a chain of computation.
1789 // Give it a small SethiUllman number so it will be scheduled right before
1790 // its predecessors that it doesn't lengthen their live ranges.
1791 SethiUllmanNumber = 0;
1792 else if (SU->NumPredsLeft == 0 &&
1793 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1794 SethiUllmanNumber = 0xffff;
1797 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1799 if (I->isCtrl) continue; // ignore chain preds
1800 SUnit *PredSU = I->Dep;
1801 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1802 if (PredSethiUllman > SethiUllmanNumber) {
1803 SethiUllmanNumber = PredSethiUllman;
1805 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1809 SethiUllmanNumber += Extra;
1812 return SethiUllmanNumber;
1815 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1816 /// scheduling units.
1817 void TDRegReductionPriorityQueue::CalculateSethiUllmanNumbers() {
1818 SethiUllmanNumbers.assign(SUnits->size(), 0);
1820 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1821 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1824 //===----------------------------------------------------------------------===//
1825 // Public Constructor Functions
1826 //===----------------------------------------------------------------------===//
1828 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1830 MachineBasicBlock *BB,
1832 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
1833 const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
1835 BURegReductionPriorityQueue *priorityQueue =
1836 new BURegReductionPriorityQueue(TII, TRI, Fast);
1838 ScheduleDAGRRList * scheduleDAG =
1839 new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, Fast,priorityQueue);
1840 priorityQueue->setScheduleDAG(scheduleDAG);
1844 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1846 MachineBasicBlock *BB,
1848 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false, Fast,
1849 new TDRegReductionPriorityQueue());