1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAGSDNodes.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/Target/TargetRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/Compiler.h"
27 #include "llvm/ADT/PriorityQueue.h"
28 #include "llvm/ADT/SmallSet.h"
29 #include "llvm/ADT/Statistic.h"
30 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/CommandLine.h"
35 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
36 STATISTIC(NumUnfolds, "Number of nodes unfolded");
37 STATISTIC(NumDups, "Number of duplicated nodes");
38 STATISTIC(NumPRCopies, "Number of physical register copies");
40 static RegisterScheduler
41 burrListDAGScheduler("list-burr",
42 "Bottom-up register reduction list scheduling",
43 createBURRListDAGScheduler);
44 static RegisterScheduler
45 tdrListrDAGScheduler("list-tdrr",
46 "Top-down register reduction list scheduling",
47 createTDRRListDAGScheduler);
50 //===----------------------------------------------------------------------===//
51 /// ScheduleDAGRRList - The actual register reduction list scheduler
52 /// implementation. This supports both top-down and bottom-up scheduling.
54 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAGSDNodes {
56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
60 /// AvailableQueue - The priority queue to use for the available SUnits.
61 SchedulingPriorityQueue *AvailableQueue;
63 /// LiveRegDefs - A set of physical registers and their definition
64 /// that are "live". These nodes must be scheduled before any other nodes that
65 /// modifies the registers can be scheduled.
67 std::vector<SUnit*> LiveRegDefs;
68 std::vector<unsigned> LiveRegCycles;
70 /// Topo - A topological ordering for SUnits which permits fast IsReachable
71 /// and similar queries.
72 ScheduleDAGTopologicalSort Topo;
75 ScheduleDAGRRList(SelectionDAG *dag, MachineBasicBlock *bb,
76 const TargetMachine &tm, bool isbottomup,
77 SchedulingPriorityQueue *availqueue)
78 : ScheduleDAGSDNodes(dag, bb, tm), isBottomUp(isbottomup),
79 AvailableQueue(availqueue), Topo(SUnits) {
82 ~ScheduleDAGRRList() {
83 delete AvailableQueue;
88 /// IsReachable - Checks if SU is reachable from TargetSU.
89 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
90 return Topo.IsReachable(SU, TargetSU);
93 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
96 return Topo.WillCreateCycle(SU, TargetSU);
99 /// AddPred - adds a predecessor edge to SUnit SU.
100 /// This returns true if this is a new predecessor.
101 /// Updates the topological ordering if required.
102 void AddPred(SUnit *SU, const SDep &D) {
103 Topo.AddPred(SU, D.getSUnit());
107 /// RemovePred - removes a predecessor edge from SUnit SU.
108 /// This returns true if an edge was removed.
109 /// Updates the topological ordering if required.
110 void RemovePred(SUnit *SU, const SDep &D) {
111 Topo.RemovePred(SU, D.getSUnit());
116 void ReleasePred(SUnit *SU, SDep *PredEdge);
117 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
118 void CapturePred(SDep *PredEdge);
119 void ScheduleNodeBottomUp(SUnit*, unsigned);
120 void ScheduleNodeTopDown(SUnit*, unsigned);
121 void UnscheduleNodeBottomUp(SUnit*);
122 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
123 SUnit *CopyAndMoveSuccessors(SUnit*);
124 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
125 const TargetRegisterClass*,
126 const TargetRegisterClass*,
127 SmallVector<SUnit*, 2>&);
128 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
129 void ListScheduleTopDown();
130 void ListScheduleBottomUp();
133 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
134 /// Updates the topological ordering if required.
135 SUnit *CreateNewSUnit(SDNode *N) {
136 unsigned NumSUnits = SUnits.size();
137 SUnit *NewNode = NewSUnit(N);
138 // Update the topological ordering.
139 if (NewNode->NodeNum >= NumSUnits)
140 Topo.InitDAGTopologicalSorting();
144 /// CreateClone - Creates a new SUnit from an existing one.
145 /// Updates the topological ordering if required.
146 SUnit *CreateClone(SUnit *N) {
147 unsigned NumSUnits = SUnits.size();
148 SUnit *NewNode = Clone(N);
149 // Update the topological ordering.
150 if (NewNode->NodeNum >= NumSUnits)
151 Topo.InitDAGTopologicalSorting();
155 /// ForceUnitLatencies - Return true, since register-pressure-reducing
156 /// scheduling doesn't need actual latency information.
157 bool ForceUnitLatencies() const { return true; }
159 } // end anonymous namespace
162 /// Schedule - Schedule the DAG using list scheduling.
163 void ScheduleDAGRRList::Schedule() {
164 DOUT << "********** List Scheduling **********\n";
167 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
168 LiveRegCycles.resize(TRI->getNumRegs(), 0);
170 // Build the scheduling graph.
173 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
174 SUnits[su].dumpAll(this));
175 Topo.InitDAGTopologicalSorting();
177 AvailableQueue->initNodes(SUnits);
179 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
181 ListScheduleBottomUp();
183 ListScheduleTopDown();
185 AvailableQueue->releaseState();
188 //===----------------------------------------------------------------------===//
189 // Bottom-Up Scheduling
190 //===----------------------------------------------------------------------===//
192 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
193 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
194 void ScheduleDAGRRList::ReleasePred(SUnit *SU, SDep *PredEdge) {
195 SUnit *PredSU = PredEdge->getSUnit();
196 --PredSU->NumSuccsLeft;
199 if (PredSU->NumSuccsLeft < 0) {
200 cerr << "*** Scheduling failed! ***\n";
202 cerr << " has been released too many times!\n";
207 if (PredSU->NumSuccsLeft == 0) {
208 PredSU->isAvailable = true;
209 AvailableQueue->push(PredSU);
213 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
214 /// count of its predecessors. If a predecessor pending count is zero, add it to
215 /// the Available queue.
216 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
217 DOUT << "*** Scheduling [" << CurCycle << "]: ";
218 DEBUG(SU->dump(this));
220 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
221 SU->setHeightToAtLeast(CurCycle);
222 Sequence.push_back(SU);
224 // Bottom up: release predecessors
225 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
227 ReleasePred(SU, &*I);
228 if (I->isAssignedRegDep()) {
229 // This is a physical register dependency and it's impossible or
230 // expensive to copy the register. Make sure nothing that can
231 // clobber the register is scheduled between the predecessor and
233 if (!LiveRegDefs[I->getReg()]) {
235 LiveRegDefs[I->getReg()] = I->getSUnit();
236 LiveRegCycles[I->getReg()] = CurCycle;
241 // Release all the implicit physical register defs that are live.
242 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
244 if (I->isAssignedRegDep()) {
245 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
246 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
247 assert(LiveRegDefs[I->getReg()] == SU &&
248 "Physical register dependency violated?");
250 LiveRegDefs[I->getReg()] = NULL;
251 LiveRegCycles[I->getReg()] = 0;
256 SU->isScheduled = true;
257 AvailableQueue->ScheduledNode(SU);
260 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
261 /// unscheduled, incrcease the succ left count of its predecessors. Remove
262 /// them from AvailableQueue if necessary.
263 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
264 SUnit *PredSU = PredEdge->getSUnit();
265 if (PredSU->isAvailable) {
266 PredSU->isAvailable = false;
267 if (!PredSU->isPending)
268 AvailableQueue->remove(PredSU);
271 ++PredSU->NumSuccsLeft;
274 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
275 /// its predecessor states to reflect the change.
276 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
277 DOUT << "*** Unscheduling [" << SU->getHeight() << "]: ";
278 DEBUG(SU->dump(this));
280 AvailableQueue->UnscheduledNode(SU);
282 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
285 if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
286 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
287 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
288 "Physical register dependency violated?");
290 LiveRegDefs[I->getReg()] = NULL;
291 LiveRegCycles[I->getReg()] = 0;
295 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
297 if (I->isAssignedRegDep()) {
298 if (!LiveRegDefs[I->getReg()]) {
299 LiveRegDefs[I->getReg()] = SU;
302 if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
303 LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
307 SU->setHeightDirty();
308 SU->isScheduled = false;
309 SU->isAvailable = true;
310 AvailableQueue->push(SU);
313 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
314 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
315 /// SUnit. Also returns if a successor is unscheduled in the process.
316 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
317 unsigned &CurCycle) {
319 while (CurCycle > BtCycle) {
320 OldSU = Sequence.back();
322 if (SU->isSucc(OldSU))
323 // Don't try to remove SU from AvailableQueue.
324 SU->isAvailable = false;
325 UnscheduleNodeBottomUp(OldSU);
330 if (SU->isSucc(OldSU)) {
331 assert(false && "Something is wrong!");
338 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
339 /// successors to the newly created node.
340 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
341 if (SU->getNode()->getFlaggedNode())
344 SDNode *N = SU->getNode();
349 bool TryUnfold = false;
350 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
351 MVT VT = N->getValueType(i);
354 else if (VT == MVT::Other)
357 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
358 const SDValue &Op = N->getOperand(i);
359 MVT VT = Op.getNode()->getValueType(Op.getResNo());
365 SmallVector<SDNode*, 2> NewNodes;
366 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
369 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
370 assert(NewNodes.size() == 2 && "Expected a load folding node!");
373 SDNode *LoadNode = NewNodes[0];
374 unsigned NumVals = N->getNumValues();
375 unsigned OldNumVals = SU->getNode()->getNumValues();
376 for (unsigned i = 0; i != NumVals; ++i)
377 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
378 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
379 SDValue(LoadNode, 1));
381 // LoadNode may already exist. This can happen when there is another
382 // load from the same location and producing the same type of value
383 // but it has different alignment or volatileness.
384 bool isNewLoad = true;
386 if (LoadNode->getNodeId() != -1) {
387 LoadSU = &SUnits[LoadNode->getNodeId()];
390 LoadSU = CreateNewSUnit(LoadNode);
391 LoadNode->setNodeId(LoadSU->NodeNum);
392 ComputeLatency(LoadSU);
395 SUnit *NewSU = CreateNewSUnit(N);
396 assert(N->getNodeId() == -1 && "Node already inserted!");
397 N->setNodeId(NewSU->NodeNum);
399 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
400 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
401 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
402 NewSU->isTwoAddress = true;
406 if (TID.isCommutable())
407 NewSU->isCommutable = true;
408 ComputeLatency(NewSU);
411 SmallVector<SDep, 4> ChainSuccs;
412 SmallVector<SDep, 4> LoadPreds;
413 SmallVector<SDep, 4> NodePreds;
414 SmallVector<SDep, 4> NodeSuccs;
415 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
419 else if (I->getSUnit()->getNode() &&
420 I->getSUnit()->getNode()->isOperandOf(LoadNode))
421 LoadPreds.push_back(*I);
423 NodePreds.push_back(*I);
425 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
428 ChainSuccs.push_back(*I);
430 NodeSuccs.push_back(*I);
433 if (ChainPred.getSUnit()) {
434 RemovePred(SU, ChainPred);
436 AddPred(LoadSU, ChainPred);
438 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
439 const SDep &Pred = LoadPreds[i];
440 RemovePred(SU, Pred);
442 AddPred(LoadSU, Pred);
445 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
446 const SDep &Pred = NodePreds[i];
447 RemovePred(SU, Pred);
448 AddPred(NewSU, Pred);
450 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
451 SDep D = NodeSuccs[i];
452 SUnit *SuccDep = D.getSUnit();
454 RemovePred(SuccDep, D);
458 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
459 SDep D = ChainSuccs[i];
460 SUnit *SuccDep = D.getSUnit();
462 RemovePred(SuccDep, D);
469 AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
473 AvailableQueue->addNode(LoadSU);
474 AvailableQueue->addNode(NewSU);
478 if (NewSU->NumSuccsLeft == 0) {
479 NewSU->isAvailable = true;
485 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
486 NewSU = CreateClone(SU);
488 // New SUnit has the exact same predecessors.
489 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
491 if (!I->isArtificial())
494 // Only copy scheduled successors. Cut them from old node's successor
495 // list and move them over.
496 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
497 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
499 if (I->isArtificial())
501 SUnit *SuccSU = I->getSUnit();
502 if (SuccSU->isScheduled) {
507 DelDeps.push_back(std::make_pair(SuccSU, D));
510 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
511 RemovePred(DelDeps[i].first, DelDeps[i].second);
513 AvailableQueue->updateNode(SU);
514 AvailableQueue->addNode(NewSU);
520 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
521 /// scheduled successors of the given SUnit to the last copy.
522 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
523 const TargetRegisterClass *DestRC,
524 const TargetRegisterClass *SrcRC,
525 SmallVector<SUnit*, 2> &Copies) {
526 SUnit *CopyFromSU = CreateNewSUnit(NULL);
527 CopyFromSU->CopySrcRC = SrcRC;
528 CopyFromSU->CopyDstRC = DestRC;
530 SUnit *CopyToSU = CreateNewSUnit(NULL);
531 CopyToSU->CopySrcRC = DestRC;
532 CopyToSU->CopyDstRC = SrcRC;
534 // Only copy scheduled successors. Cut them from old node's successor
535 // list and move them over.
536 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
537 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
539 if (I->isArtificial())
541 SUnit *SuccSU = I->getSUnit();
542 if (SuccSU->isScheduled) {
544 D.setSUnit(CopyToSU);
546 DelDeps.push_back(std::make_pair(SuccSU, *I));
549 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
550 RemovePred(DelDeps[i].first, DelDeps[i].second);
552 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
553 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
555 AvailableQueue->updateNode(SU);
556 AvailableQueue->addNode(CopyFromSU);
557 AvailableQueue->addNode(CopyToSU);
558 Copies.push_back(CopyFromSU);
559 Copies.push_back(CopyToSU);
564 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
565 /// definition of the specified node.
566 /// FIXME: Move to SelectionDAG?
567 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
568 const TargetInstrInfo *TII) {
569 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
570 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
571 unsigned NumRes = TID.getNumDefs();
572 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
577 return N->getValueType(NumRes);
580 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
581 /// scheduling of the given node to satisfy live physical register dependencies.
582 /// If the specific node is the last one that's available to schedule, do
583 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
584 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
585 SmallVector<unsigned, 4> &LRegs){
586 if (NumLiveRegs == 0)
589 SmallSet<unsigned, 4> RegAdded;
590 // If this node would clobber any "live" register, then it's not ready.
591 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
593 if (I->isAssignedRegDep()) {
594 unsigned Reg = I->getReg();
595 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->getSUnit()) {
596 if (RegAdded.insert(Reg))
597 LRegs.push_back(Reg);
599 for (const unsigned *Alias = TRI->getAliasSet(Reg);
601 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->getSUnit()) {
602 if (RegAdded.insert(*Alias))
603 LRegs.push_back(*Alias);
608 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
609 if (!Node->isMachineOpcode())
611 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
612 if (!TID.ImplicitDefs)
614 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
615 if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) {
616 if (RegAdded.insert(*Reg))
617 LRegs.push_back(*Reg);
619 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
621 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
622 if (RegAdded.insert(*Alias))
623 LRegs.push_back(*Alias);
627 return !LRegs.empty();
631 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
633 void ScheduleDAGRRList::ListScheduleBottomUp() {
634 unsigned CurCycle = 0;
635 // Add root to Available queue.
636 if (!SUnits.empty()) {
637 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
638 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
639 RootSU->isAvailable = true;
640 AvailableQueue->push(RootSU);
643 // While Available queue is not empty, grab the node with the highest
644 // priority. If it is not ready put it back. Schedule the node.
645 SmallVector<SUnit*, 4> NotReady;
646 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
647 Sequence.reserve(SUnits.size());
648 while (!AvailableQueue->empty()) {
649 bool Delayed = false;
651 SUnit *CurSU = AvailableQueue->pop();
653 SmallVector<unsigned, 4> LRegs;
654 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
657 LRegsMap.insert(std::make_pair(CurSU, LRegs));
659 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
660 NotReady.push_back(CurSU);
661 CurSU = AvailableQueue->pop();
664 // All candidates are delayed due to live physical reg dependencies.
665 // Try backtracking, code duplication, or inserting cross class copies
667 if (Delayed && !CurSU) {
668 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
669 SUnit *TrySU = NotReady[i];
670 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
672 // Try unscheduling up to the point where it's safe to schedule
674 unsigned LiveCycle = CurCycle;
675 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
676 unsigned Reg = LRegs[j];
677 unsigned LCycle = LiveRegCycles[Reg];
678 LiveCycle = std::min(LiveCycle, LCycle);
680 SUnit *OldSU = Sequence[LiveCycle];
681 if (!WillCreateCycle(TrySU, OldSU)) {
682 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
683 // Force the current node to be scheduled before the node that
684 // requires the physical reg dep.
685 if (OldSU->isAvailable) {
686 OldSU->isAvailable = false;
687 AvailableQueue->remove(OldSU);
689 AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
690 /*Reg=*/0, /*isNormalMemory=*/false,
691 /*isMustAlias=*/false, /*isArtificial=*/true));
692 // If one or more successors has been unscheduled, then the current
693 // node is no longer avaialable. Schedule a successor that's now
694 // available instead.
695 if (!TrySU->isAvailable)
696 CurSU = AvailableQueue->pop();
699 TrySU->isPending = false;
700 NotReady.erase(NotReady.begin()+i);
707 // Can't backtrack. If it's too expensive to copy the value, then try
708 // duplicate the nodes that produces these "too expensive to copy"
709 // values to break the dependency. In case even that doesn't work,
710 // insert cross class copies.
711 // If it's not too expensive, i.e. cost != -1, issue copies.
712 SUnit *TrySU = NotReady[0];
713 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
714 assert(LRegs.size() == 1 && "Can't handle this yet!");
715 unsigned Reg = LRegs[0];
716 SUnit *LRDef = LiveRegDefs[Reg];
717 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
718 const TargetRegisterClass *RC =
719 TRI->getPhysicalRegisterRegClass(Reg, VT);
720 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
722 // If cross copy register class is null, then it must be possible copy
723 // the value directly. Do not try duplicate the def.
726 NewDef = CopyAndMoveSuccessors(LRDef);
730 // Issue copies, these can be expensive cross register class copies.
731 SmallVector<SUnit*, 2> Copies;
732 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
733 DOUT << "Adding an edge from SU #" << TrySU->NodeNum
734 << " to SU #" << Copies.front()->NodeNum << "\n";
735 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
736 /*Reg=*/0, /*isNormalMemory=*/false,
737 /*isMustAlias=*/false,
738 /*isArtificial=*/true));
739 NewDef = Copies.back();
742 DOUT << "Adding an edge from SU #" << NewDef->NodeNum
743 << " to SU #" << TrySU->NodeNum << "\n";
744 LiveRegDefs[Reg] = NewDef;
745 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
746 /*Reg=*/0, /*isNormalMemory=*/false,
747 /*isMustAlias=*/false,
748 /*isArtificial=*/true));
749 TrySU->isAvailable = false;
754 assert(false && "Unable to resolve live physical register dependencies!");
759 // Add the nodes that aren't ready back onto the available list.
760 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
761 NotReady[i]->isPending = false;
762 // May no longer be available due to backtracking.
763 if (NotReady[i]->isAvailable)
764 AvailableQueue->push(NotReady[i]);
769 ScheduleNodeBottomUp(CurSU, CurCycle);
773 // Reverse the order if it is bottom up.
774 std::reverse(Sequence.begin(), Sequence.end());
777 VerifySchedule(isBottomUp);
781 //===----------------------------------------------------------------------===//
782 // Top-Down Scheduling
783 //===----------------------------------------------------------------------===//
785 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
786 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
787 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
788 SUnit *SuccSU = SuccEdge->getSUnit();
789 --SuccSU->NumPredsLeft;
792 if (SuccSU->NumPredsLeft < 0) {
793 cerr << "*** Scheduling failed! ***\n";
795 cerr << " has been released too many times!\n";
800 if (SuccSU->NumPredsLeft == 0) {
801 SuccSU->isAvailable = true;
802 AvailableQueue->push(SuccSU);
806 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
807 /// count of its successors. If a successor pending count is zero, add it to
808 /// the Available queue.
809 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
810 DOUT << "*** Scheduling [" << CurCycle << "]: ";
811 DEBUG(SU->dump(this));
813 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
814 SU->setDepthToAtLeast(CurCycle);
815 Sequence.push_back(SU);
817 // Top down: release successors
818 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
820 assert(!I->isAssignedRegDep() &&
821 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
823 ReleaseSucc(SU, &*I);
826 SU->isScheduled = true;
827 AvailableQueue->ScheduledNode(SU);
830 /// ListScheduleTopDown - The main loop of list scheduling for top-down
832 void ScheduleDAGRRList::ListScheduleTopDown() {
833 unsigned CurCycle = 0;
835 // All leaves to Available queue.
836 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
837 // It is available if it has no predecessors.
838 if (SUnits[i].Preds.empty()) {
839 AvailableQueue->push(&SUnits[i]);
840 SUnits[i].isAvailable = true;
844 // While Available queue is not empty, grab the node with the highest
845 // priority. If it is not ready put it back. Schedule the node.
846 Sequence.reserve(SUnits.size());
847 while (!AvailableQueue->empty()) {
848 SUnit *CurSU = AvailableQueue->pop();
851 ScheduleNodeTopDown(CurSU, CurCycle);
856 VerifySchedule(isBottomUp);
861 //===----------------------------------------------------------------------===//
862 // RegReductionPriorityQueue Implementation
863 //===----------------------------------------------------------------------===//
865 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
866 // to reduce register pressure.
870 class RegReductionPriorityQueue;
872 /// Sorting functions for the Available queue.
873 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
874 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
875 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
876 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
878 bool operator()(const SUnit* left, const SUnit* right) const;
881 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
882 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
883 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
884 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
886 bool operator()(const SUnit* left, const SUnit* right) const;
888 } // end anonymous namespace
890 static inline bool isCopyFromLiveIn(const SUnit *SU) {
891 SDNode *N = SU->getNode();
892 return N && N->getOpcode() == ISD::CopyFromReg &&
893 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
896 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
897 /// Smaller number is the higher priority.
899 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
900 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
901 if (SethiUllmanNumber != 0)
902 return SethiUllmanNumber;
905 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
907 if (I->isCtrl()) continue; // ignore chain preds
908 SUnit *PredSU = I->getSUnit();
909 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
910 if (PredSethiUllman > SethiUllmanNumber) {
911 SethiUllmanNumber = PredSethiUllman;
913 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl())
917 SethiUllmanNumber += Extra;
919 if (SethiUllmanNumber == 0)
920 SethiUllmanNumber = 1;
922 return SethiUllmanNumber;
927 class VISIBILITY_HIDDEN RegReductionPriorityQueue
928 : public SchedulingPriorityQueue {
929 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
930 unsigned currentQueueId;
933 // SUnits - The SUnits for the current graph.
934 std::vector<SUnit> *SUnits;
936 const TargetInstrInfo *TII;
937 const TargetRegisterInfo *TRI;
938 ScheduleDAGRRList *scheduleDAG;
940 // SethiUllmanNumbers - The SethiUllman number for each node.
941 std::vector<unsigned> SethiUllmanNumbers;
944 RegReductionPriorityQueue(const TargetInstrInfo *tii,
945 const TargetRegisterInfo *tri) :
946 Queue(SF(this)), currentQueueId(0),
947 TII(tii), TRI(tri), scheduleDAG(NULL) {}
949 void initNodes(std::vector<SUnit> &sunits) {
951 // Add pseudo dependency edges for two-address nodes.
952 AddPseudoTwoAddrDeps();
953 // Calculate node priorities.
954 CalculateSethiUllmanNumbers();
957 void addNode(const SUnit *SU) {
958 unsigned SUSize = SethiUllmanNumbers.size();
959 if (SUnits->size() > SUSize)
960 SethiUllmanNumbers.resize(SUSize*2, 0);
961 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
964 void updateNode(const SUnit *SU) {
965 SethiUllmanNumbers[SU->NodeNum] = 0;
966 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
969 void releaseState() {
971 SethiUllmanNumbers.clear();
974 unsigned getNodePriority(const SUnit *SU) const {
975 assert(SU->NodeNum < SethiUllmanNumbers.size());
976 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
977 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
978 // CopyFromReg should be close to its def because it restricts
979 // allocation choices. But if it is a livein then perhaps we want it
980 // closer to its uses so it can be coalesced.
982 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
983 // CopyToReg should be close to its uses to facilitate coalescing and
986 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
987 Opc == TargetInstrInfo::INSERT_SUBREG)
988 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
989 // facilitate coalescing.
991 if (SU->NumSuccs == 0)
992 // If SU does not have a use, i.e. it doesn't produce a value that would
993 // be consumed (e.g. store), then it terminates a chain of computation.
994 // Give it a large SethiUllman number so it will be scheduled right
995 // before its predecessors that it doesn't lengthen their live ranges.
997 if (SU->NumPreds == 0)
998 // If SU does not have a def, schedule it close to its uses because it
999 // does not lengthen any live ranges.
1001 return SethiUllmanNumbers[SU->NodeNum];
1004 unsigned size() const { return Queue.size(); }
1006 bool empty() const { return Queue.empty(); }
1008 void push(SUnit *U) {
1009 assert(!U->NodeQueueId && "Node in the queue already");
1010 U->NodeQueueId = ++currentQueueId;
1014 void push_all(const std::vector<SUnit *> &Nodes) {
1015 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1020 if (empty()) return NULL;
1021 SUnit *V = Queue.top();
1027 void remove(SUnit *SU) {
1028 assert(!Queue.empty() && "Queue is empty!");
1029 assert(SU->NodeQueueId != 0 && "Not in queue!");
1030 Queue.erase_one(SU);
1031 SU->NodeQueueId = 0;
1034 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1035 scheduleDAG = scheduleDag;
1039 bool canClobber(const SUnit *SU, const SUnit *Op);
1040 void AddPseudoTwoAddrDeps();
1041 void CalculateSethiUllmanNumbers();
1044 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1045 BURegReductionPriorityQueue;
1047 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1048 TDRegReductionPriorityQueue;
1051 /// closestSucc - Returns the scheduled cycle of the successor which is
1052 /// closet to the current cycle.
1053 static unsigned closestSucc(const SUnit *SU) {
1054 unsigned MaxHeight = 0;
1055 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1057 unsigned Height = I->getSUnit()->getHeight();
1058 // If there are bunch of CopyToRegs stacked up, they should be considered
1059 // to be at the same position.
1060 if (I->getSUnit()->getNode() &&
1061 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
1062 Height = closestSucc(I->getSUnit())+1;
1063 if (Height > MaxHeight)
1069 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1070 /// for scratch registers. Live-in operands and live-out results don't count
1071 /// since they are "fixed".
1072 static unsigned calcMaxScratches(const SUnit *SU) {
1073 unsigned Scratches = 0;
1074 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1076 if (I->isCtrl()) continue; // ignore chain preds
1077 if (!I->getSUnit()->getNode() ||
1078 I->getSUnit()->getNode()->getOpcode() != ISD::CopyFromReg)
1081 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1083 if (I->isCtrl()) continue; // ignore chain succs
1084 if (!I->getSUnit()->getNode() ||
1085 I->getSUnit()->getNode()->getOpcode() != ISD::CopyToReg)
1092 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1093 unsigned LPriority = SPQ->getNodePriority(left);
1094 unsigned RPriority = SPQ->getNodePriority(right);
1095 if (LPriority != RPriority)
1096 return LPriority > RPriority;
1098 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1103 // and the following instructions are both ready.
1107 // Then schedule t2 = op first.
1114 // This creates more short live intervals.
1115 unsigned LDist = closestSucc(left);
1116 unsigned RDist = closestSucc(right);
1118 return LDist < RDist;
1120 // Intuitively, it's good to push down instructions whose results are
1121 // liveout so their long live ranges won't conflict with other values
1122 // which are needed inside the BB. Further prioritize liveout instructions
1123 // by the number of operands which are calculated within the BB.
1124 unsigned LScratch = calcMaxScratches(left);
1125 unsigned RScratch = calcMaxScratches(right);
1126 if (LScratch != RScratch)
1127 return LScratch > RScratch;
1129 if (left->getHeight() != right->getHeight())
1130 return left->getHeight() > right->getHeight();
1132 if (left->getDepth() != right->getDepth())
1133 return left->getDepth() < right->getDepth();
1135 assert(left->NodeQueueId && right->NodeQueueId &&
1136 "NodeQueueId cannot be zero");
1137 return (left->NodeQueueId > right->NodeQueueId);
1142 RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
1143 if (SU->isTwoAddress) {
1144 unsigned Opc = SU->getNode()->getMachineOpcode();
1145 const TargetInstrDesc &TID = TII->get(Opc);
1146 unsigned NumRes = TID.getNumDefs();
1147 unsigned NumOps = TID.getNumOperands() - NumRes;
1148 for (unsigned i = 0; i != NumOps; ++i) {
1149 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1150 SDNode *DU = SU->getNode()->getOperand(i).getNode();
1151 if (DU->getNodeId() != -1 &&
1152 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1161 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1163 static bool hasCopyToRegUse(const SUnit *SU) {
1164 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1166 if (I->isCtrl()) continue;
1167 const SUnit *SuccSU = I->getSUnit();
1168 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
1174 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1175 /// physical register defs.
1176 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1177 const TargetInstrInfo *TII,
1178 const TargetRegisterInfo *TRI) {
1179 SDNode *N = SuccSU->getNode();
1180 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1181 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1182 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1183 const unsigned *SUImpDefs =
1184 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
1187 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1188 MVT VT = N->getValueType(i);
1189 if (VT == MVT::Flag || VT == MVT::Other)
1191 if (!N->hasAnyUseOfValue(i))
1193 unsigned Reg = ImpDefs[i - NumDefs];
1194 for (;*SUImpDefs; ++SUImpDefs) {
1195 unsigned SUReg = *SUImpDefs;
1196 if (TRI->regsOverlap(Reg, SUReg))
1203 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1204 /// it as a def&use operand. Add a pseudo control edge from it to the other
1205 /// node (if it won't create a cycle) so the two-address one will be scheduled
1206 /// first (lower in the schedule). If both nodes are two-address, favor the
1207 /// one that has a CopyToReg use (more likely to be a loop induction update).
1208 /// If both are two-address, but one is commutable while the other is not
1209 /// commutable, favor the one that's not commutable.
1211 void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1212 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1213 SUnit *SU = &(*SUnits)[i];
1214 if (!SU->isTwoAddress)
1217 SDNode *Node = SU->getNode();
1218 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
1221 unsigned Opc = Node->getMachineOpcode();
1222 const TargetInstrDesc &TID = TII->get(Opc);
1223 unsigned NumRes = TID.getNumDefs();
1224 unsigned NumOps = TID.getNumOperands() - NumRes;
1225 for (unsigned j = 0; j != NumOps; ++j) {
1226 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
1228 SDNode *DU = SU->getNode()->getOperand(j).getNode();
1229 if (DU->getNodeId() == -1)
1231 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1232 if (!DUSU) continue;
1233 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1234 E = DUSU->Succs.end(); I != E; ++I) {
1235 if (I->isCtrl()) continue;
1236 SUnit *SuccSU = I->getSUnit();
1239 // Be conservative. Ignore if nodes aren't at roughly the same
1240 // depth and height.
1241 if (SuccSU->getHeight() < SU->getHeight() &&
1242 (SU->getHeight() - SuccSU->getHeight()) > 1)
1244 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
1246 // Don't constrain nodes with physical register defs if the
1247 // predecessor can clobber them.
1248 if (SuccSU->hasPhysRegDefs) {
1249 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1252 // Don't constraint extract_subreg / insert_subreg these may be
1253 // coalesced away. We don't them close to their uses.
1254 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
1255 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1256 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1258 if ((!canClobber(SuccSU, DUSU) ||
1259 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1260 (!SU->isCommutable && SuccSU->isCommutable)) &&
1261 !scheduleDAG->IsReachable(SuccSU, SU)) {
1262 DOUT << "Adding a pseudo-two-addr edge from SU # " << SU->NodeNum
1263 << " to SU #" << SuccSU->NodeNum << "\n";
1264 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
1265 /*Reg=*/0, /*isNormalMemory=*/false,
1266 /*isMustAlias=*/false,
1267 /*isArtificial=*/true));
1274 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1275 /// scheduling units.
1277 void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1278 SethiUllmanNumbers.assign(SUnits->size(), 0);
1280 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1281 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1284 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1285 /// predecessors of the successors of the SUnit SU. Stop when the provided
1286 /// limit is exceeded.
1287 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1290 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1292 const SUnit *SuccSU = I->getSUnit();
1293 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1294 EE = SuccSU->Preds.end(); II != EE; ++II) {
1295 SUnit *PredSU = II->getSUnit();
1296 if (!PredSU->isScheduled)
1306 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1307 unsigned LPriority = SPQ->getNodePriority(left);
1308 unsigned RPriority = SPQ->getNodePriority(right);
1309 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
1310 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
1311 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1312 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1313 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1314 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1316 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1318 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1325 if (left->NumSuccs == 1)
1327 if (right->NumSuccs == 1)
1330 if (LPriority+LBonus != RPriority+RBonus)
1331 return LPriority+LBonus < RPriority+RBonus;
1333 if (left->getDepth() != right->getDepth())
1334 return left->getDepth() < right->getDepth();
1336 if (left->NumSuccsLeft != right->NumSuccsLeft)
1337 return left->NumSuccsLeft > right->NumSuccsLeft;
1339 assert(left->NodeQueueId && right->NodeQueueId &&
1340 "NodeQueueId cannot be zero");
1341 return (left->NodeQueueId > right->NodeQueueId);
1344 //===----------------------------------------------------------------------===//
1345 // Public Constructor Functions
1346 //===----------------------------------------------------------------------===//
1348 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1350 const TargetMachine *TM,
1351 MachineBasicBlock *BB,
1353 const TargetInstrInfo *TII = TM->getInstrInfo();
1354 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
1356 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1358 ScheduleDAGRRList *SD =
1359 new ScheduleDAGRRList(DAG, BB, *TM, true, PQ);
1360 PQ->setScheduleDAG(SD);
1364 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1366 const TargetMachine *TM,
1367 MachineBasicBlock *BB,
1369 const TargetInstrInfo *TII = TM->getInstrInfo();
1370 const TargetRegisterInfo *TRI = TM->getRegisterInfo();
1372 TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI);
1374 ScheduleDAGRRList *SD = new ScheduleDAGRRList(DAG, BB, *TM, false, PQ);
1375 PQ->setScheduleDAG(SD);