1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/ADT/SmallPtrSet.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/Support/CommandLine.h"
36 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
37 STATISTIC(NumUnfolds, "Number of nodes unfolded");
38 STATISTIC(NumDups, "Number of duplicated nodes");
39 STATISTIC(NumCCCopies, "Number of cross class copies");
41 static RegisterScheduler
42 burrListDAGScheduler("list-burr",
43 " Bottom-up register reduction list scheduling",
44 createBURRListDAGScheduler);
45 static RegisterScheduler
46 tdrListrDAGScheduler("list-tdrr",
47 " Top-down register reduction list scheduling",
48 createTDRRListDAGScheduler);
51 //===----------------------------------------------------------------------===//
52 /// ScheduleDAGRRList - The actual register reduction list scheduler
53 /// implementation. This supports both top-down and bottom-up scheduling.
55 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
57 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
61 /// AvailableQueue - The priority queue to use for the available SUnits.
62 SchedulingPriorityQueue *AvailableQueue;
64 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
65 /// that are "live". These nodes must be scheduled before any other nodes that
66 /// modifies the registers can be scheduled.
67 SmallSet<unsigned, 4> LiveRegs;
68 std::vector<SUnit*> LiveRegDefs;
69 std::vector<unsigned> LiveRegCycles;
72 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
73 const TargetMachine &tm, bool isbottomup,
74 SchedulingPriorityQueue *availqueue)
75 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
76 AvailableQueue(availqueue) {
79 ~ScheduleDAGRRList() {
80 delete AvailableQueue;
85 /// IsReachable - Checks if SU is reachable from TargetSU.
86 bool IsReachable(SUnit *SU, SUnit *TargetSU);
88 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
90 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU);
92 /// AddPred - This adds the specified node X as a predecessor of
93 /// the current node Y if not already.
94 /// This returns true if this is a new predecessor.
95 /// Updates the topological ordering if required.
96 bool AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
97 unsigned PhyReg = 0, int Cost = 1);
99 /// RemovePred - This removes the specified node N from the predecessors of
100 /// the current node M. Updates the topological ordering if required.
101 bool RemovePred(SUnit *M, SUnit *N, bool isCtrl, bool isSpecial);
104 void ReleasePred(SUnit*, bool, unsigned);
105 void ReleaseSucc(SUnit*, bool isChain, unsigned);
106 void CapturePred(SUnit*, SUnit*, bool);
107 void ScheduleNodeBottomUp(SUnit*, unsigned);
108 void ScheduleNodeTopDown(SUnit*, unsigned);
109 void UnscheduleNodeBottomUp(SUnit*);
110 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
111 SUnit *CopyAndMoveSuccessors(SUnit*);
112 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
113 const TargetRegisterClass*,
114 const TargetRegisterClass*,
115 SmallVector<SUnit*, 2>&);
116 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
117 void ListScheduleTopDown();
118 void ListScheduleBottomUp();
119 void CommuteNodesToReducePressure();
122 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
123 /// Updates the topological ordering if required.
124 SUnit *CreateNewSUnit(SDNode *N) {
125 SUnit *NewNode = NewSUnit(N);
126 // Update the topological ordering.
127 if (NewNode->NodeNum >= Node2Index.size())
128 InitDAGTopologicalSorting();
132 /// CreateClone - Creates a new SUnit from an existing one.
133 /// Updates the topological ordering if required.
134 SUnit *CreateClone(SUnit *N) {
135 SUnit *NewNode = Clone(N);
136 // Update the topological ordering.
137 if (NewNode->NodeNum >= Node2Index.size())
138 InitDAGTopologicalSorting();
142 /// Functions for preserving the topological ordering
143 /// even after dynamic insertions of new edges.
144 /// This allows a very fast implementation of IsReachable.
148 The idea of the algorithm is taken from
149 "Online algorithms for managing the topological order of
150 a directed acyclic graph" by David J. Pearce and Paul H.J. Kelly
151 This is the MNR algorithm, which was first introduced by
152 A. Marchetti-Spaccamela, U. Nanni and H. Rohnert in
153 "Maintaining a topological order under edge insertions".
155 Short description of the algorithm:
157 Topological ordering, ord, of a DAG maps each node to a topological
158 index so that for all edges X->Y it is the case that ord(X) < ord(Y).
160 This means that if there is a path from the node X to the node Z,
161 then ord(X) < ord(Z).
163 This property can be used to check for reachability of nodes:
164 if Z is reachable from X, then an insertion of the edge Z->X would
167 The algorithm first computes a topological ordering for the DAG by initializing
168 the Index2Node and Node2Index arrays and then tries to keep the ordering
169 up-to-date after edge insertions by reordering the DAG.
171 On insertion of the edge X->Y, the algorithm first marks by calling DFS the
172 nodes reachable from Y, and then shifts them using Shift to lie immediately
173 after X in Index2Node.
176 /// InitDAGTopologicalSorting - create the initial topological
177 /// ordering from the DAG to be scheduled.
178 void InitDAGTopologicalSorting();
180 /// DFS - make a DFS traversal and mark all nodes affected by the
181 /// edge insertion. These nodes will later get new topological indexes
182 /// by means of the Shift method.
183 void DFS(SUnit *SU, int UpperBound, bool& HasLoop);
185 /// Shift - reassign topological indexes for the nodes in the DAG
186 /// to preserve the topological ordering.
187 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
189 /// Allocate - assign the topological index to the node n.
190 void Allocate(int n, int index);
192 /// Index2Node - Maps topological index to the node number.
193 std::vector<int> Index2Node;
194 /// Node2Index - Maps the node number to its topological index.
195 std::vector<int> Node2Index;
196 /// Visited - a set of nodes visited during a DFS traversal.
199 } // end anonymous namespace
202 /// Schedule - Schedule the DAG using list scheduling.
203 void ScheduleDAGRRList::Schedule() {
204 DOUT << "********** List Scheduling **********\n";
206 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
207 LiveRegCycles.resize(TRI->getNumRegs(), 0);
209 // Build scheduling units.
212 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
213 SUnits[su].dumpAll(&DAG));
216 InitDAGTopologicalSorting();
218 AvailableQueue->initNodes(SUnitMap, SUnits);
220 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
222 ListScheduleBottomUp();
224 ListScheduleTopDown();
226 AvailableQueue->releaseState();
228 CommuteNodesToReducePressure();
230 DOUT << "*** Final schedule ***\n";
231 DEBUG(dumpSchedule());
234 // Emit in scheduled order
238 /// CommuteNodesToReducePressure - If a node is two-address and commutable, and
239 /// it is not the last use of its first operand, add it to the CommuteSet if
240 /// possible. It will be commuted when it is translated to a MI.
241 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
242 SmallPtrSet<SUnit*, 4> OperandSeen;
243 for (unsigned i = Sequence.size(); i != 0; ) {
245 SUnit *SU = Sequence[i];
246 if (!SU || !SU->Node) continue;
247 if (SU->isCommutable) {
248 unsigned Opc = SU->Node->getTargetOpcode();
249 const TargetInstrDesc &TID = TII->get(Opc);
250 unsigned NumRes = TID.getNumDefs();
251 unsigned NumOps = TID.getNumOperands() - NumRes;
252 for (unsigned j = 0; j != NumOps; ++j) {
253 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
256 SDNode *OpN = SU->Node->getOperand(j).Val;
257 SUnit *OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo];
258 if (OpSU && OperandSeen.count(OpSU) == 1) {
259 // Ok, so SU is not the last use of OpSU, but SU is two-address so
260 // it will clobber OpSU. Try to commute SU if no other source operands
262 bool DoCommute = true;
263 for (unsigned k = 0; k < NumOps; ++k) {
265 OpN = SU->Node->getOperand(k).Val;
266 OpSU = isPassiveNode(OpN) ? NULL : SUnitMap[OpN][SU->InstanceNo];
267 if (OpSU && OperandSeen.count(OpSU) == 1) {
274 CommuteSet.insert(SU->Node);
277 // Only look at the first use&def node for now.
282 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
285 OperandSeen.insert(I->Dep);
290 //===----------------------------------------------------------------------===//
291 // Bottom-Up Scheduling
292 //===----------------------------------------------------------------------===//
294 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
295 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
296 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
298 // FIXME: the distance between two nodes is not always == the predecessor's
299 // latency. For example, the reader can very well read the register written
300 // by the predecessor later than the issue cycle. It also depends on the
301 // interrupt model (drain vs. freeze).
302 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
304 --PredSU->NumSuccsLeft;
307 if (PredSU->NumSuccsLeft < 0) {
308 cerr << "*** List scheduling failed! ***\n";
310 cerr << " has been released too many times!\n";
315 if (PredSU->NumSuccsLeft == 0) {
316 PredSU->isAvailable = true;
317 AvailableQueue->push(PredSU);
321 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
322 /// count of its predecessors. If a predecessor pending count is zero, add it to
323 /// the Available queue.
324 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
325 DOUT << "*** Scheduling [" << CurCycle << "]: ";
326 DEBUG(SU->dump(&DAG));
327 SU->Cycle = CurCycle;
329 AvailableQueue->ScheduledNode(SU);
331 // Bottom up: release predecessors
332 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
334 ReleasePred(I->Dep, I->isCtrl, CurCycle);
336 // This is a physical register dependency and it's impossible or
337 // expensive to copy the register. Make sure nothing that can
338 // clobber the register is scheduled between the predecessor and
340 if (LiveRegs.insert(I->Reg)) {
341 LiveRegDefs[I->Reg] = I->Dep;
342 LiveRegCycles[I->Reg] = CurCycle;
347 // Release all the implicit physical register defs that are live.
348 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
351 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
352 LiveRegs.erase(I->Reg);
353 assert(LiveRegDefs[I->Reg] == SU &&
354 "Physical register dependency violated?");
355 LiveRegDefs[I->Reg] = NULL;
356 LiveRegCycles[I->Reg] = 0;
361 SU->isScheduled = true;
364 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
365 /// unscheduled, incrcease the succ left count of its predecessors. Remove
366 /// them from AvailableQueue if necessary.
367 void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
368 unsigned CycleBound = 0;
369 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
373 CycleBound = std::max(CycleBound,
374 I->Dep->Cycle + PredSU->Latency);
377 if (PredSU->isAvailable) {
378 PredSU->isAvailable = false;
379 if (!PredSU->isPending)
380 AvailableQueue->remove(PredSU);
383 PredSU->CycleBound = CycleBound;
384 ++PredSU->NumSuccsLeft;
387 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
388 /// its predecessor states to reflect the change.
389 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
390 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
391 DEBUG(SU->dump(&DAG));
393 AvailableQueue->UnscheduledNode(SU);
395 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
397 CapturePred(I->Dep, SU, I->isCtrl);
398 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
399 LiveRegs.erase(I->Reg);
400 assert(LiveRegDefs[I->Reg] == I->Dep &&
401 "Physical register dependency violated?");
402 LiveRegDefs[I->Reg] = NULL;
403 LiveRegCycles[I->Reg] = 0;
407 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
410 if (LiveRegs.insert(I->Reg)) {
411 assert(!LiveRegDefs[I->Reg] &&
412 "Physical register dependency violated?");
413 LiveRegDefs[I->Reg] = SU;
415 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
416 LiveRegCycles[I->Reg] = I->Dep->Cycle;
421 SU->isScheduled = false;
422 SU->isAvailable = true;
423 AvailableQueue->push(SU);
426 /// IsReachable - Checks if SU is reachable from TargetSU.
427 bool ScheduleDAGRRList::IsReachable(SUnit *SU, SUnit *TargetSU) {
428 // If insertion of the edge SU->TargetSU would create a cycle
429 // then there is a path from TargetSU to SU.
430 int UpperBound, LowerBound;
431 LowerBound = Node2Index[TargetSU->NodeNum];
432 UpperBound = Node2Index[SU->NodeNum];
433 bool HasLoop = false;
434 // Is Ord(TargetSU) < Ord(SU) ?
435 if (LowerBound < UpperBound) {
437 // There may be a path from TargetSU to SU. Check for it.
438 DFS(TargetSU, UpperBound, HasLoop);
443 /// Allocate - assign the topological index to the node n.
444 inline void ScheduleDAGRRList::Allocate(int n, int index) {
445 Node2Index[n] = index;
446 Index2Node[index] = n;
449 /// InitDAGTopologicalSorting - create the initial topological
450 /// ordering from the DAG to be scheduled.
451 void ScheduleDAGRRList::InitDAGTopologicalSorting() {
452 unsigned DAGSize = SUnits.size();
453 std::vector<unsigned> InDegree(DAGSize);
454 std::vector<SUnit*> WorkList;
455 WorkList.reserve(DAGSize);
456 std::vector<SUnit*> TopOrder;
457 TopOrder.reserve(DAGSize);
459 // Initialize the data structures.
460 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
461 SUnit *SU = &SUnits[i];
462 int NodeNum = SU->NodeNum;
463 unsigned Degree = SU->Succs.size();
464 InDegree[NodeNum] = Degree;
466 // Is it a node without dependencies?
468 assert(SU->Succs.empty() && "SUnit should have no successors");
469 // Collect leaf nodes.
470 WorkList.push_back(SU);
474 while (!WorkList.empty()) {
475 SUnit *SU = WorkList.back();
477 TopOrder.push_back(SU);
478 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
481 if (!--InDegree[SU->NodeNum])
482 // If all dependencies of the node are processed already,
483 // then the node can be computed now.
484 WorkList.push_back(SU);
488 // Second pass, assign the actual topological order as node ids.
493 Index2Node.resize(DAGSize);
494 Node2Index.resize(DAGSize);
495 Visited.resize(DAGSize);
497 for (std::vector<SUnit*>::reverse_iterator TI = TopOrder.rbegin(),
498 TE = TopOrder.rend();TI != TE; ++TI) {
499 Allocate((*TI)->NodeNum, Id);
504 // Check correctness of the ordering
505 for (unsigned i = 0, e = DAGSize; i != e; ++i) {
506 SUnit *SU = &SUnits[i];
507 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
509 assert(Node2Index[SU->NodeNum] > Node2Index[I->Dep->NodeNum] &&
510 "Wrong topological sorting");
516 /// AddPred - adds an edge from SUnit X to SUnit Y.
517 /// Updates the topological ordering if required.
518 bool ScheduleDAGRRList::AddPred(SUnit *Y, SUnit *X, bool isCtrl, bool isSpecial,
519 unsigned PhyReg, int Cost) {
520 int UpperBound, LowerBound;
521 LowerBound = Node2Index[Y->NodeNum];
522 UpperBound = Node2Index[X->NodeNum];
523 bool HasLoop = false;
524 // Is Ord(X) < Ord(Y) ?
525 if (LowerBound < UpperBound) {
526 // Update the topological order.
528 DFS(Y, UpperBound, HasLoop);
529 assert(!HasLoop && "Inserted edge creates a loop!");
530 // Recompute topological indexes.
531 Shift(Visited, LowerBound, UpperBound);
533 // Now really insert the edge.
534 return Y->addPred(X, isCtrl, isSpecial, PhyReg, Cost);
537 /// RemovePred - This removes the specified node N from the predecessors of
538 /// the current node M. Updates the topological ordering if required.
539 bool ScheduleDAGRRList::RemovePred(SUnit *M, SUnit *N,
540 bool isCtrl, bool isSpecial) {
541 // InitDAGTopologicalSorting();
542 return M->removePred(N, isCtrl, isSpecial);
545 /// DFS - Make a DFS traversal to mark all nodes reachable from SU and mark
546 /// all nodes affected by the edge insertion. These nodes will later get new
547 /// topological indexes by means of the Shift method.
548 void ScheduleDAGRRList::DFS(SUnit *SU, int UpperBound, bool& HasLoop) {
549 std::vector<SUnit*> WorkList;
550 WorkList.reserve(SUnits.size());
552 WorkList.push_back(SU);
553 while (!WorkList.empty()) {
554 SU = WorkList.back();
556 Visited.set(SU->NodeNum);
557 for (int I = SU->Succs.size()-1; I >= 0; --I) {
558 int s = SU->Succs[I].Dep->NodeNum;
559 if (Node2Index[s] == UpperBound) {
563 // Visit successors if not already and in affected region.
564 if (!Visited.test(s) && Node2Index[s] < UpperBound) {
565 WorkList.push_back(SU->Succs[I].Dep);
571 /// Shift - Renumber the nodes so that the topological ordering is
573 void ScheduleDAGRRList::Shift(BitVector& Visited, int LowerBound,
579 for (i = LowerBound; i <= UpperBound; ++i) {
580 // w is node at topological index i.
581 int w = Index2Node[i];
582 if (Visited.test(w)) {
588 Allocate(w, i - shift);
592 for (unsigned j = 0; j < L.size(); ++j) {
593 Allocate(L[j], i - shift);
599 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
601 bool ScheduleDAGRRList::WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
602 if (IsReachable(TargetSU, SU))
604 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
606 if (I->Cost < 0 && IsReachable(TargetSU, I->Dep))
611 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
612 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
613 /// SUnit. Also returns if a successor is unscheduled in the process.
614 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
615 unsigned &CurCycle) {
617 while (CurCycle > BtCycle) {
618 OldSU = Sequence.back();
620 if (SU->isSucc(OldSU))
621 // Don't try to remove SU from AvailableQueue.
622 SU->isAvailable = false;
623 UnscheduleNodeBottomUp(OldSU);
628 if (SU->isSucc(OldSU)) {
629 assert(false && "Something is wrong!");
636 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
637 /// successors to the newly created node.
638 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
639 if (SU->FlaggedNodes.size())
642 SDNode *N = SU->Node;
647 bool TryUnfold = false;
648 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
649 MVT::ValueType VT = N->getValueType(i);
652 else if (VT == MVT::Other)
655 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
656 const SDOperand &Op = N->getOperand(i);
657 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
663 SmallVector<SDNode*, 4> NewNodes;
664 if (!TII->unfoldMemoryOperand(DAG, N, NewNodes))
667 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
668 assert(NewNodes.size() == 2 && "Expected a load folding node!");
671 SDNode *LoadNode = NewNodes[0];
672 unsigned NumVals = N->getNumValues();
673 unsigned OldNumVals = SU->Node->getNumValues();
674 for (unsigned i = 0; i != NumVals; ++i)
675 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, i), SDOperand(N, i));
676 DAG.ReplaceAllUsesOfValueWith(SDOperand(SU->Node, OldNumVals-1),
677 SDOperand(LoadNode, 1));
679 SUnit *NewSU = CreateNewSUnit(N);
680 SUnitMap[N].push_back(NewSU);
681 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
682 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
683 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
684 NewSU->isTwoAddress = true;
688 if (TID.isCommutable())
689 NewSU->isCommutable = true;
690 // FIXME: Calculate height / depth and propagate the changes?
691 NewSU->Depth = SU->Depth;
692 NewSU->Height = SU->Height;
693 ComputeLatency(NewSU);
695 // LoadNode may already exist. This can happen when there is another
696 // load from the same location and producing the same type of value
697 // but it has different alignment or volatileness.
698 bool isNewLoad = true;
700 DenseMap<SDNode*, std::vector<SUnit*> >::iterator SMI =
701 SUnitMap.find(LoadNode);
702 if (SMI != SUnitMap.end()) {
703 LoadSU = SMI->second.front();
706 LoadSU = CreateNewSUnit(LoadNode);
707 SUnitMap[LoadNode].push_back(LoadSU);
709 LoadSU->Depth = SU->Depth;
710 LoadSU->Height = SU->Height;
711 ComputeLatency(LoadSU);
714 SUnit *ChainPred = NULL;
715 SmallVector<SDep, 4> ChainSuccs;
716 SmallVector<SDep, 4> LoadPreds;
717 SmallVector<SDep, 4> NodePreds;
718 SmallVector<SDep, 4> NodeSuccs;
719 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
723 else if (I->Dep->Node && I->Dep->Node->isOperandOf(LoadNode))
724 LoadPreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
726 NodePreds.push_back(SDep(I->Dep, I->Reg, I->Cost, false, false));
728 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
731 ChainSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
732 I->isCtrl, I->isSpecial));
734 NodeSuccs.push_back(SDep(I->Dep, I->Reg, I->Cost,
735 I->isCtrl, I->isSpecial));
739 RemovePred(SU, ChainPred, true, false);
741 AddPred(LoadSU, ChainPred, true, false);
743 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
744 SDep *Pred = &LoadPreds[i];
745 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
747 AddPred(LoadSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
748 Pred->Reg, Pred->Cost);
751 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
752 SDep *Pred = &NodePreds[i];
753 RemovePred(SU, Pred->Dep, Pred->isCtrl, Pred->isSpecial);
754 AddPred(NewSU, Pred->Dep, Pred->isCtrl, Pred->isSpecial,
755 Pred->Reg, Pred->Cost);
757 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
758 SDep *Succ = &NodeSuccs[i];
759 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
760 AddPred(Succ->Dep, NewSU, Succ->isCtrl, Succ->isSpecial,
761 Succ->Reg, Succ->Cost);
763 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
764 SDep *Succ = &ChainSuccs[i];
765 RemovePred(Succ->Dep, SU, Succ->isCtrl, Succ->isSpecial);
767 AddPred(Succ->Dep, LoadSU, Succ->isCtrl, Succ->isSpecial,
768 Succ->Reg, Succ->Cost);
772 AddPred(NewSU, LoadSU, false, false);
776 AvailableQueue->addNode(LoadSU);
777 AvailableQueue->addNode(NewSU);
781 if (NewSU->NumSuccsLeft == 0) {
782 NewSU->isAvailable = true;
788 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
789 NewSU = CreateClone(SU);
791 // New SUnit has the exact same predecessors.
792 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
795 AddPred(NewSU, I->Dep, I->isCtrl, false, I->Reg, I->Cost);
796 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
799 // Only copy scheduled successors. Cut them from old node's successor
800 // list and move them over.
801 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
802 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
806 if (I->Dep->isScheduled) {
807 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
808 AddPred(I->Dep, NewSU, I->isCtrl, false, I->Reg, I->Cost);
809 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
812 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
813 SUnit *Succ = DelDeps[i].first;
814 bool isCtrl = DelDeps[i].second;
815 RemovePred(Succ, SU, isCtrl, false);
818 AvailableQueue->updateNode(SU);
819 AvailableQueue->addNode(NewSU);
825 /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
826 /// and move all scheduled successors of the given SUnit to the last copy.
827 void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
828 const TargetRegisterClass *DestRC,
829 const TargetRegisterClass *SrcRC,
830 SmallVector<SUnit*, 2> &Copies) {
831 SUnit *CopyFromSU = CreateNewSUnit(NULL);
832 CopyFromSU->CopySrcRC = SrcRC;
833 CopyFromSU->CopyDstRC = DestRC;
834 CopyFromSU->Depth = SU->Depth;
835 CopyFromSU->Height = SU->Height;
837 SUnit *CopyToSU = CreateNewSUnit(NULL);
838 CopyToSU->CopySrcRC = DestRC;
839 CopyToSU->CopyDstRC = SrcRC;
841 // Only copy scheduled successors. Cut them from old node's successor
842 // list and move them over.
843 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
844 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
848 if (I->Dep->isScheduled) {
849 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
850 AddPred(I->Dep, CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
851 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
854 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
855 SUnit *Succ = DelDeps[i].first;
856 bool isCtrl = DelDeps[i].second;
857 RemovePred(Succ, SU, isCtrl, false);
860 AddPred(CopyFromSU, SU, false, false, Reg, -1);
861 AddPred(CopyToSU, CopyFromSU, false, false, Reg, 1);
863 AvailableQueue->updateNode(SU);
864 AvailableQueue->addNode(CopyFromSU);
865 AvailableQueue->addNode(CopyToSU);
866 Copies.push_back(CopyFromSU);
867 Copies.push_back(CopyToSU);
872 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
873 /// definition of the specified node.
874 /// FIXME: Move to SelectionDAG?
875 static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
876 const TargetInstrInfo *TII) {
877 const TargetInstrDesc &TID = TII->get(N->getTargetOpcode());
878 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
879 unsigned NumRes = TID.getNumDefs();
880 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
885 return N->getValueType(NumRes);
888 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
889 /// scheduling of the given node to satisfy live physical register dependencies.
890 /// If the specific node is the last one that's available to schedule, do
891 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
892 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
893 SmallVector<unsigned, 4> &LRegs){
894 if (LiveRegs.empty())
897 SmallSet<unsigned, 4> RegAdded;
898 // If this node would clobber any "live" register, then it's not ready.
899 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
902 unsigned Reg = I->Reg;
903 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
904 if (RegAdded.insert(Reg))
905 LRegs.push_back(Reg);
907 for (const unsigned *Alias = TRI->getAliasSet(Reg);
909 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
910 if (RegAdded.insert(*Alias))
911 LRegs.push_back(*Alias);
916 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
917 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
918 if (!Node || !Node->isTargetOpcode())
920 const TargetInstrDesc &TID = TII->get(Node->getTargetOpcode());
921 if (!TID.ImplicitDefs)
923 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
924 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
925 if (RegAdded.insert(*Reg))
926 LRegs.push_back(*Reg);
928 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
930 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
931 if (RegAdded.insert(*Alias))
932 LRegs.push_back(*Alias);
936 return !LRegs.empty();
940 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
942 void ScheduleDAGRRList::ListScheduleBottomUp() {
943 unsigned CurCycle = 0;
944 // Add root to Available queue.
945 if (!SUnits.empty()) {
946 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
947 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
948 RootSU->isAvailable = true;
949 AvailableQueue->push(RootSU);
952 // While Available queue is not empty, grab the node with the highest
953 // priority. If it is not ready put it back. Schedule the node.
954 SmallVector<SUnit*, 4> NotReady;
955 while (!AvailableQueue->empty()) {
956 bool Delayed = false;
957 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
958 SUnit *CurSU = AvailableQueue->pop();
960 if (CurSU->CycleBound <= CurCycle) {
961 SmallVector<unsigned, 4> LRegs;
962 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
965 LRegsMap.insert(std::make_pair(CurSU, LRegs));
968 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
969 NotReady.push_back(CurSU);
970 CurSU = AvailableQueue->pop();
973 // All candidates are delayed due to live physical reg dependencies.
974 // Try backtracking, code duplication, or inserting cross class copies
976 if (Delayed && !CurSU) {
977 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
978 SUnit *TrySU = NotReady[i];
979 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
981 // Try unscheduling up to the point where it's safe to schedule
983 unsigned LiveCycle = CurCycle;
984 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
985 unsigned Reg = LRegs[j];
986 unsigned LCycle = LiveRegCycles[Reg];
987 LiveCycle = std::min(LiveCycle, LCycle);
989 SUnit *OldSU = Sequence[LiveCycle];
990 if (!WillCreateCycle(TrySU, OldSU)) {
991 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
992 // Force the current node to be scheduled before the node that
993 // requires the physical reg dep.
994 if (OldSU->isAvailable) {
995 OldSU->isAvailable = false;
996 AvailableQueue->remove(OldSU);
998 AddPred(TrySU, OldSU, true, true);
999 // If one or more successors has been unscheduled, then the current
1000 // node is no longer avaialable. Schedule a successor that's now
1001 // available instead.
1002 if (!TrySU->isAvailable)
1003 CurSU = AvailableQueue->pop();
1006 TrySU->isPending = false;
1007 NotReady.erase(NotReady.begin()+i);
1014 // Can't backtrack. Try duplicating the nodes that produces these
1015 // "expensive to copy" values to break the dependency. In case even
1016 // that doesn't work, insert cross class copies.
1017 SUnit *TrySU = NotReady[0];
1018 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1019 assert(LRegs.size() == 1 && "Can't handle this yet!");
1020 unsigned Reg = LRegs[0];
1021 SUnit *LRDef = LiveRegDefs[Reg];
1022 SUnit *NewDef = CopyAndMoveSuccessors(LRDef);
1024 // Issue expensive cross register class copies.
1025 MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
1026 const TargetRegisterClass *RC =
1027 TRI->getPhysicalRegisterRegClass(Reg, VT);
1028 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1030 assert(false && "Don't know how to copy this physical register!");
1033 SmallVector<SUnit*, 2> Copies;
1034 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1035 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
1036 << " to SU #" << Copies.front()->NodeNum << "\n";
1037 AddPred(TrySU, Copies.front(), true, true);
1038 NewDef = Copies.back();
1041 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
1042 << " to SU #" << TrySU->NodeNum << "\n";
1043 LiveRegDefs[Reg] = NewDef;
1044 AddPred(NewDef, TrySU, true, true);
1045 TrySU->isAvailable = false;
1050 assert(false && "Unable to resolve live physical register dependencies!");
1055 // Add the nodes that aren't ready back onto the available list.
1056 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
1057 NotReady[i]->isPending = false;
1058 // May no longer be available due to backtracking.
1059 if (NotReady[i]->isAvailable)
1060 AvailableQueue->push(NotReady[i]);
1065 Sequence.push_back(0);
1067 ScheduleNodeBottomUp(CurSU, CurCycle);
1068 Sequence.push_back(CurSU);
1073 // Reverse the order if it is bottom up.
1074 std::reverse(Sequence.begin(), Sequence.end());
1078 // Verify that all SUnits were scheduled.
1079 bool AnyNotSched = false;
1080 unsigned DeadNodes = 0;
1082 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1083 if (!SUnits[i].isScheduled) {
1084 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1089 cerr << "*** List scheduling failed! ***\n";
1090 SUnits[i].dump(&DAG);
1091 cerr << "has not been scheduled!\n";
1094 if (SUnits[i].NumSuccsLeft != 0) {
1096 cerr << "*** List scheduling failed! ***\n";
1097 SUnits[i].dump(&DAG);
1098 cerr << "has successors left!\n";
1102 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1105 assert(!AnyNotSched);
1106 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1107 "The number of nodes scheduled doesn't match the expected number!");
1111 //===----------------------------------------------------------------------===//
1112 // Top-Down Scheduling
1113 //===----------------------------------------------------------------------===//
1115 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1116 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1117 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
1118 unsigned CurCycle) {
1119 // FIXME: the distance between two nodes is not always == the predecessor's
1120 // latency. For example, the reader can very well read the register written
1121 // by the predecessor later than the issue cycle. It also depends on the
1122 // interrupt model (drain vs. freeze).
1123 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
1125 --SuccSU->NumPredsLeft;
1128 if (SuccSU->NumPredsLeft < 0) {
1129 cerr << "*** List scheduling failed! ***\n";
1131 cerr << " has been released too many times!\n";
1136 if (SuccSU->NumPredsLeft == 0) {
1137 SuccSU->isAvailable = true;
1138 AvailableQueue->push(SuccSU);
1143 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1144 /// count of its successors. If a successor pending count is zero, add it to
1145 /// the Available queue.
1146 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1147 DOUT << "*** Scheduling [" << CurCycle << "]: ";
1148 DEBUG(SU->dump(&DAG));
1149 SU->Cycle = CurCycle;
1151 AvailableQueue->ScheduledNode(SU);
1153 // Top down: release successors
1154 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1156 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
1157 SU->isScheduled = true;
1160 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1162 void ScheduleDAGRRList::ListScheduleTopDown() {
1163 unsigned CurCycle = 0;
1165 // All leaves to Available queue.
1166 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1167 // It is available if it has no predecessors.
1168 if (SUnits[i].Preds.empty()) {
1169 AvailableQueue->push(&SUnits[i]);
1170 SUnits[i].isAvailable = true;
1174 // While Available queue is not empty, grab the node with the highest
1175 // priority. If it is not ready put it back. Schedule the node.
1176 std::vector<SUnit*> NotReady;
1177 while (!AvailableQueue->empty()) {
1178 SUnit *CurSU = AvailableQueue->pop();
1179 while (CurSU && CurSU->CycleBound > CurCycle) {
1180 NotReady.push_back(CurSU);
1181 CurSU = AvailableQueue->pop();
1184 // Add the nodes that aren't ready back onto the available list.
1185 AvailableQueue->push_all(NotReady);
1189 Sequence.push_back(0);
1191 ScheduleNodeTopDown(CurSU, CurCycle);
1192 Sequence.push_back(CurSU);
1199 // Verify that all SUnits were scheduled.
1200 bool AnyNotSched = false;
1201 unsigned DeadNodes = 0;
1203 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1204 if (!SUnits[i].isScheduled) {
1205 if (SUnits[i].NumPreds == 0 && SUnits[i].NumSuccs == 0) {
1210 cerr << "*** List scheduling failed! ***\n";
1211 SUnits[i].dump(&DAG);
1212 cerr << "has not been scheduled!\n";
1215 if (SUnits[i].NumPredsLeft != 0) {
1217 cerr << "*** List scheduling failed! ***\n";
1218 SUnits[i].dump(&DAG);
1219 cerr << "has predecessors left!\n";
1223 for (unsigned i = 0, e = Sequence.size(); i != e; ++i)
1226 assert(!AnyNotSched);
1227 assert(Sequence.size() + DeadNodes - Noops == SUnits.size() &&
1228 "The number of nodes scheduled doesn't match the expected number!");
1234 //===----------------------------------------------------------------------===//
1235 // RegReductionPriorityQueue Implementation
1236 //===----------------------------------------------------------------------===//
1238 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1239 // to reduce register pressure.
1243 class RegReductionPriorityQueue;
1245 /// Sorting functions for the Available queue.
1246 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1247 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
1248 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
1249 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1251 bool operator()(const SUnit* left, const SUnit* right) const;
1254 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1255 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
1256 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
1257 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1259 bool operator()(const SUnit* left, const SUnit* right) const;
1261 } // end anonymous namespace
1263 static inline bool isCopyFromLiveIn(const SUnit *SU) {
1264 SDNode *N = SU->Node;
1265 return N && N->getOpcode() == ISD::CopyFromReg &&
1266 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
1271 class VISIBILITY_HIDDEN RegReductionPriorityQueue
1272 : public SchedulingPriorityQueue {
1273 std::set<SUnit*, SF> Queue;
1274 unsigned currentQueueId;
1277 RegReductionPriorityQueue() :
1278 Queue(SF(this)), currentQueueId(0) {}
1280 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
1281 std::vector<SUnit> &sunits) {}
1283 virtual void addNode(const SUnit *SU) {}
1285 virtual void updateNode(const SUnit *SU) {}
1287 virtual void releaseState() {}
1289 virtual unsigned getNodePriority(const SUnit *SU) const {
1293 unsigned size() const { return Queue.size(); }
1295 bool empty() const { return Queue.empty(); }
1297 void push(SUnit *U) {
1298 assert(!U->NodeQueueId && "Node in the queue already");
1299 U->NodeQueueId = ++currentQueueId;
1303 void push_all(const std::vector<SUnit *> &Nodes) {
1304 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1309 if (empty()) return NULL;
1310 typename std::set<SUnit*, SF>::iterator i = prior(Queue.end());
1317 void remove(SUnit *SU) {
1318 assert(!Queue.empty() && "Queue is empty!");
1319 size_t RemovedNum = Queue.erase(SU);
1320 assert(RemovedNum > 0 && "Not in queue!");
1321 assert(RemovedNum == 1 && "Multiple times in the queue!");
1322 SU->NodeQueueId = 0;
1327 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
1328 : public RegReductionPriorityQueue<SF> {
1329 // SUnitMap SDNode to SUnit mapping (n -> n).
1330 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
1332 // SUnits - The SUnits for the current graph.
1333 const std::vector<SUnit> *SUnits;
1335 // SethiUllmanNumbers - The SethiUllman number for each node.
1336 std::vector<unsigned> SethiUllmanNumbers;
1338 const TargetInstrInfo *TII;
1339 const TargetRegisterInfo *TRI;
1340 ScheduleDAGRRList *scheduleDAG;
1342 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii,
1343 const TargetRegisterInfo *tri)
1344 : TII(tii), TRI(tri), scheduleDAG(NULL) {}
1346 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
1347 std::vector<SUnit> &sunits) {
1350 // Add pseudo dependency edges for two-address nodes.
1351 AddPseudoTwoAddrDeps();
1352 // Calculate node priorities.
1353 CalculateSethiUllmanNumbers();
1356 void addNode(const SUnit *SU) {
1357 SethiUllmanNumbers.resize(SUnits->size(), 0);
1358 CalcNodeSethiUllmanNumber(SU);
1361 void updateNode(const SUnit *SU) {
1362 SethiUllmanNumbers[SU->NodeNum] = 0;
1363 CalcNodeSethiUllmanNumber(SU);
1366 void releaseState() {
1368 SethiUllmanNumbers.clear();
1371 unsigned getNodePriority(const SUnit *SU) const {
1372 assert(SU->NodeNum < SethiUllmanNumbers.size());
1373 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1374 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
1375 // CopyFromReg should be close to its def because it restricts
1376 // allocation choices. But if it is a livein then perhaps we want it
1377 // closer to its uses so it can be coalesced.
1379 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1380 // CopyToReg should be close to its uses to facilitate coalescing and
1383 else if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1384 Opc == TargetInstrInfo::INSERT_SUBREG)
1385 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1386 // facilitate coalescing.
1388 else if (SU->NumSuccs == 0)
1389 // If SU does not have a use, i.e. it doesn't produce a value that would
1390 // be consumed (e.g. store), then it terminates a chain of computation.
1391 // Give it a large SethiUllman number so it will be scheduled right
1392 // before its predecessors that it doesn't lengthen their live ranges.
1394 else if (SU->NumPreds == 0)
1395 // If SU does not have a def, schedule it close to its uses because it
1396 // does not lengthen any live ranges.
1399 return SethiUllmanNumbers[SU->NodeNum];
1402 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1403 scheduleDAG = scheduleDag;
1407 bool canClobber(const SUnit *SU, const SUnit *Op);
1408 void AddPseudoTwoAddrDeps();
1409 void CalculateSethiUllmanNumbers();
1410 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1415 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1416 : public RegReductionPriorityQueue<SF> {
1417 // SUnitMap SDNode to SUnit mapping (n -> n).
1418 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
1420 // SUnits - The SUnits for the current graph.
1421 const std::vector<SUnit> *SUnits;
1423 // SethiUllmanNumbers - The SethiUllman number for each node.
1424 std::vector<unsigned> SethiUllmanNumbers;
1427 TDRegReductionPriorityQueue() {}
1429 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
1430 std::vector<SUnit> &sunits) {
1433 // Calculate node priorities.
1434 CalculateSethiUllmanNumbers();
1437 void addNode(const SUnit *SU) {
1438 SethiUllmanNumbers.resize(SUnits->size(), 0);
1439 CalcNodeSethiUllmanNumber(SU);
1442 void updateNode(const SUnit *SU) {
1443 SethiUllmanNumbers[SU->NodeNum] = 0;
1444 CalcNodeSethiUllmanNumber(SU);
1447 void releaseState() {
1449 SethiUllmanNumbers.clear();
1452 unsigned getNodePriority(const SUnit *SU) const {
1453 assert(SU->NodeNum < SethiUllmanNumbers.size());
1454 return SethiUllmanNumbers[SU->NodeNum];
1458 void CalculateSethiUllmanNumbers();
1459 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1463 /// closestSucc - Returns the scheduled cycle of the successor which is
1464 /// closet to the current cycle.
1465 static unsigned closestSucc(const SUnit *SU) {
1466 unsigned MaxCycle = 0;
1467 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1469 unsigned Cycle = I->Dep->Cycle;
1470 // If there are bunch of CopyToRegs stacked up, they should be considered
1471 // to be at the same position.
1472 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
1473 Cycle = closestSucc(I->Dep)+1;
1474 if (Cycle > MaxCycle)
1480 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1481 /// for scratch registers. Live-in operands and live-out results don't count
1482 /// since they are "fixed".
1483 static unsigned calcMaxScratches(const SUnit *SU) {
1484 unsigned Scratches = 0;
1485 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1487 if (I->isCtrl) continue; // ignore chain preds
1488 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
1491 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1493 if (I->isCtrl) continue; // ignore chain succs
1494 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
1501 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1503 unsigned LPriority = SPQ->getNodePriority(left);
1504 unsigned RPriority = SPQ->getNodePriority(right);
1505 if (LPriority != RPriority)
1506 return LPriority > RPriority;
1508 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1513 // and the following instructions are both ready.
1517 // Then schedule t2 = op first.
1524 // This creates more short live intervals.
1525 unsigned LDist = closestSucc(left);
1526 unsigned RDist = closestSucc(right);
1528 return LDist < RDist;
1530 // Intuitively, it's good to push down instructions whose results are
1531 // liveout so their long live ranges won't conflict with other values
1532 // which are needed inside the BB. Further prioritize liveout instructions
1533 // by the number of operands which are calculated within the BB.
1534 unsigned LScratch = calcMaxScratches(left);
1535 unsigned RScratch = calcMaxScratches(right);
1536 if (LScratch != RScratch)
1537 return LScratch > RScratch;
1539 if (left->Height != right->Height)
1540 return left->Height > right->Height;
1542 if (left->Depth != right->Depth)
1543 return left->Depth < right->Depth;
1545 if (left->CycleBound != right->CycleBound)
1546 return left->CycleBound > right->CycleBound;
1548 assert(left->NodeQueueId && right->NodeQueueId &&
1549 "NodeQueueId cannot be zero");
1550 return (left->NodeQueueId > right->NodeQueueId);
1553 template<class SF> bool
1554 BURegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
1555 if (SU->isTwoAddress) {
1556 unsigned Opc = SU->Node->getTargetOpcode();
1557 const TargetInstrDesc &TID = TII->get(Opc);
1558 unsigned NumRes = TID.getNumDefs();
1559 unsigned NumOps = TID.getNumOperands() - NumRes;
1560 for (unsigned i = 0; i != NumOps; ++i) {
1561 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1562 SDNode *DU = SU->Node->getOperand(i).Val;
1563 if ((*SUnitMap).find(DU) != (*SUnitMap).end() &&
1564 Op == (*SUnitMap)[DU][SU->InstanceNo])
1573 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1575 static bool hasCopyToRegUse(SUnit *SU) {
1576 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1578 if (I->isCtrl) continue;
1579 SUnit *SuccSU = I->Dep;
1580 if (SuccSU->Node && SuccSU->Node->getOpcode() == ISD::CopyToReg)
1586 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1587 /// physical register def.
1588 static bool canClobberPhysRegDefs(SUnit *SuccSU, SUnit *SU,
1589 const TargetInstrInfo *TII,
1590 const TargetRegisterInfo *TRI) {
1591 SDNode *N = SuccSU->Node;
1592 unsigned NumDefs = TII->get(N->getTargetOpcode()).getNumDefs();
1593 const unsigned *ImpDefs = TII->get(N->getTargetOpcode()).getImplicitDefs();
1596 const unsigned *SUImpDefs =
1597 TII->get(SU->Node->getTargetOpcode()).getImplicitDefs();
1600 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1601 MVT::ValueType VT = N->getValueType(i);
1602 if (VT == MVT::Flag || VT == MVT::Other)
1604 unsigned Reg = ImpDefs[i - NumDefs];
1605 for (;*SUImpDefs; ++SUImpDefs) {
1606 unsigned SUReg = *SUImpDefs;
1607 if (TRI->regsOverlap(Reg, SUReg))
1614 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1615 /// it as a def&use operand. Add a pseudo control edge from it to the other
1616 /// node (if it won't create a cycle) so the two-address one will be scheduled
1617 /// first (lower in the schedule). If both nodes are two-address, favor the
1618 /// one that has a CopyToReg use (more likely to be a loop induction update).
1619 /// If both are two-address, but one is commutable while the other is not
1620 /// commutable, favor the one that's not commutable.
1622 void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1623 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1624 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1625 if (!SU->isTwoAddress)
1628 SDNode *Node = SU->Node;
1629 if (!Node || !Node->isTargetOpcode() || SU->FlaggedNodes.size() > 0)
1632 unsigned Opc = Node->getTargetOpcode();
1633 const TargetInstrDesc &TID = TII->get(Opc);
1634 unsigned NumRes = TID.getNumDefs();
1635 unsigned NumOps = TID.getNumOperands() - NumRes;
1636 for (unsigned j = 0; j != NumOps; ++j) {
1637 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) != -1) {
1638 SDNode *DU = SU->Node->getOperand(j).Val;
1639 if ((*SUnitMap).find(DU) == (*SUnitMap).end())
1641 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
1642 if (!DUSU) continue;
1643 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1645 if (I->isCtrl) continue;
1646 SUnit *SuccSU = I->Dep;
1649 // Be conservative. Ignore if nodes aren't at roughly the same
1650 // depth and height.
1651 if (SuccSU->Height < SU->Height && (SU->Height - SuccSU->Height) > 1)
1653 if (!SuccSU->Node || !SuccSU->Node->isTargetOpcode())
1655 // Don't constrain nodes with physical register defs if the
1656 // predecessor can clobber them.
1657 if (SuccSU->hasPhysRegDefs) {
1658 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1661 // Don't constraint extract_subreg / insert_subreg these may be
1662 // coalesced away. We don't them close to their uses.
1663 unsigned SuccOpc = SuccSU->Node->getTargetOpcode();
1664 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1665 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1667 if ((!canClobber(SuccSU, DUSU) ||
1668 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1669 (!SU->isCommutable && SuccSU->isCommutable)) &&
1670 !scheduleDAG->IsReachable(SuccSU, SU)) {
1671 DOUT << "Adding an edge from SU # " << SU->NodeNum
1672 << " to SU #" << SuccSU->NodeNum << "\n";
1673 scheduleDAG->AddPred(SU, SuccSU, true, true);
1681 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1682 /// Smaller number is the higher priority.
1684 unsigned BURegReductionPriorityQueue<SF>::
1685 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1686 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1687 if (SethiUllmanNumber != 0)
1688 return SethiUllmanNumber;
1691 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1693 if (I->isCtrl) continue; // ignore chain preds
1694 SUnit *PredSU = I->Dep;
1695 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1696 if (PredSethiUllman > SethiUllmanNumber) {
1697 SethiUllmanNumber = PredSethiUllman;
1699 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1703 SethiUllmanNumber += Extra;
1705 if (SethiUllmanNumber == 0)
1706 SethiUllmanNumber = 1;
1708 return SethiUllmanNumber;
1711 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1712 /// scheduling units.
1714 void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1715 SethiUllmanNumbers.assign(SUnits->size(), 0);
1717 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1718 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1721 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1722 /// predecessors of the successors of the SUnit SU. Stop when the provided
1723 /// limit is exceeded.
1724 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1727 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1729 SUnit *SuccSU = I->Dep;
1730 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1731 EE = SuccSU->Preds.end(); II != EE; ++II) {
1732 SUnit *PredSU = II->Dep;
1733 if (!PredSU->isScheduled)
1743 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1744 unsigned LPriority = SPQ->getNodePriority(left);
1745 unsigned RPriority = SPQ->getNodePriority(right);
1746 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1747 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
1748 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1749 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1750 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1751 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1753 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1755 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1762 if (left->NumSuccs == 1)
1764 if (right->NumSuccs == 1)
1767 if (LPriority+LBonus != RPriority+RBonus)
1768 return LPriority+LBonus < RPriority+RBonus;
1770 if (left->Depth != right->Depth)
1771 return left->Depth < right->Depth;
1773 if (left->NumSuccsLeft != right->NumSuccsLeft)
1774 return left->NumSuccsLeft > right->NumSuccsLeft;
1776 if (left->CycleBound != right->CycleBound)
1777 return left->CycleBound > right->CycleBound;
1779 assert(left->NodeQueueId && right->NodeQueueId &&
1780 "NodeQueueId cannot be zero");
1781 return (left->NodeQueueId > right->NodeQueueId);
1784 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1785 /// Smaller number is the higher priority.
1787 unsigned TDRegReductionPriorityQueue<SF>::
1788 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1789 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1790 if (SethiUllmanNumber != 0)
1791 return SethiUllmanNumber;
1793 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1794 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1795 SethiUllmanNumber = 0xffff;
1796 else if (SU->NumSuccsLeft == 0)
1797 // If SU does not have a use, i.e. it doesn't produce a value that would
1798 // be consumed (e.g. store), then it terminates a chain of computation.
1799 // Give it a small SethiUllman number so it will be scheduled right before
1800 // its predecessors that it doesn't lengthen their live ranges.
1801 SethiUllmanNumber = 0;
1802 else if (SU->NumPredsLeft == 0 &&
1803 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1804 SethiUllmanNumber = 0xffff;
1807 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1809 if (I->isCtrl) continue; // ignore chain preds
1810 SUnit *PredSU = I->Dep;
1811 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1812 if (PredSethiUllman > SethiUllmanNumber) {
1813 SethiUllmanNumber = PredSethiUllman;
1815 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1819 SethiUllmanNumber += Extra;
1822 return SethiUllmanNumber;
1825 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1826 /// scheduling units.
1828 void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1829 SethiUllmanNumbers.assign(SUnits->size(), 0);
1831 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1832 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1835 //===----------------------------------------------------------------------===//
1836 // Public Constructor Functions
1837 //===----------------------------------------------------------------------===//
1839 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1841 MachineBasicBlock *BB) {
1842 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
1843 const TargetRegisterInfo *TRI = DAG->getTarget().getRegisterInfo();
1845 BURegReductionPriorityQueue<bu_ls_rr_sort> *priorityQueue =
1846 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII, TRI);
1848 ScheduleDAGRRList * scheduleDAG =
1849 new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true, priorityQueue);
1850 priorityQueue->setScheduleDAG(scheduleDAG);
1854 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1856 MachineBasicBlock *BB) {
1857 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
1858 new TDRegReductionPriorityQueue<td_ls_rr_sort>());