1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/Target/TargetRegisterInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
35 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
36 STATISTIC(NumUnfolds, "Number of nodes unfolded");
37 STATISTIC(NumDups, "Number of duplicated nodes");
38 STATISTIC(NumPRCopies, "Number of physical register copies");
40 static RegisterScheduler
41 burrListDAGScheduler("list-burr",
42 "Bottom-up register reduction list scheduling",
43 createBURRListDAGScheduler);
44 static RegisterScheduler
45 tdrListrDAGScheduler("list-tdrr",
46 "Top-down register reduction list scheduling",
47 createTDRRListDAGScheduler);
50 //===----------------------------------------------------------------------===//
51 /// ScheduleDAGRRList - The actual register reduction list scheduler
52 /// implementation. This supports both top-down and bottom-up scheduling.
54 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAGSDNodes {
56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
60 /// AvailableQueue - The priority queue to use for the available SUnits.
61 SchedulingPriorityQueue *AvailableQueue;
63 /// LiveRegDefs - A set of physical registers and their definition
64 /// that are "live". These nodes must be scheduled before any other nodes that
65 /// modifies the registers can be scheduled.
67 std::vector<SUnit*> LiveRegDefs;
68 std::vector<unsigned> LiveRegCycles;
70 /// Topo - A topological ordering for SUnits which permits fast IsReachable
71 /// and similar queries.
72 ScheduleDAGTopologicalSort Topo;
75 ScheduleDAGRRList(MachineFunction &mf,
77 SchedulingPriorityQueue *availqueue)
78 : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup),
79 AvailableQueue(availqueue), Topo(SUnits) {
82 ~ScheduleDAGRRList() {
83 delete AvailableQueue;
88 /// IsReachable - Checks if SU is reachable from TargetSU.
89 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
90 return Topo.IsReachable(SU, TargetSU);
93 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
96 return Topo.WillCreateCycle(SU, TargetSU);
99 /// AddPred - adds a predecessor edge to SUnit SU.
100 /// This returns true if this is a new predecessor.
101 /// Updates the topological ordering if required.
102 void AddPred(SUnit *SU, const SDep &D) {
103 Topo.AddPred(SU, D.getSUnit());
107 /// RemovePred - removes a predecessor edge from SUnit SU.
108 /// This returns true if an edge was removed.
109 /// Updates the topological ordering if required.
110 void RemovePred(SUnit *SU, const SDep &D) {
111 Topo.RemovePred(SU, D.getSUnit());
116 void ReleasePred(SUnit *SU, const SDep *PredEdge);
117 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
118 void CapturePred(SDep *PredEdge);
119 void ScheduleNodeBottomUp(SUnit*, unsigned);
120 void ScheduleNodeTopDown(SUnit*, unsigned);
121 void UnscheduleNodeBottomUp(SUnit*);
122 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
123 SUnit *CopyAndMoveSuccessors(SUnit*);
124 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
125 const TargetRegisterClass*,
126 const TargetRegisterClass*,
127 SmallVector<SUnit*, 2>&);
128 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
129 void ListScheduleTopDown();
130 void ListScheduleBottomUp();
133 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
134 /// Updates the topological ordering if required.
135 SUnit *CreateNewSUnit(SDNode *N) {
136 unsigned NumSUnits = SUnits.size();
137 SUnit *NewNode = NewSUnit(N);
138 // Update the topological ordering.
139 if (NewNode->NodeNum >= NumSUnits)
140 Topo.InitDAGTopologicalSorting();
144 /// CreateClone - Creates a new SUnit from an existing one.
145 /// Updates the topological ordering if required.
146 SUnit *CreateClone(SUnit *N) {
147 unsigned NumSUnits = SUnits.size();
148 SUnit *NewNode = Clone(N);
149 // Update the topological ordering.
150 if (NewNode->NodeNum >= NumSUnits)
151 Topo.InitDAGTopologicalSorting();
155 /// ForceUnitLatencies - Return true, since register-pressure-reducing
156 /// scheduling doesn't need actual latency information.
157 bool ForceUnitLatencies() const { return true; }
159 } // end anonymous namespace
162 /// Schedule - Schedule the DAG using list scheduling.
163 void ScheduleDAGRRList::Schedule() {
164 DOUT << "********** List Scheduling **********\n";
167 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
168 LiveRegCycles.resize(TRI->getNumRegs(), 0);
170 // Build the scheduling graph.
173 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
174 SUnits[su].dumpAll(this));
175 Topo.InitDAGTopologicalSorting();
177 AvailableQueue->initNodes(SUnits);
179 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
181 ListScheduleBottomUp();
183 ListScheduleTopDown();
185 AvailableQueue->releaseState();
188 //===----------------------------------------------------------------------===//
189 // Bottom-Up Scheduling
190 //===----------------------------------------------------------------------===//
192 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
193 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
194 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
195 SUnit *PredSU = PredEdge->getSUnit();
196 --PredSU->NumSuccsLeft;
199 if (PredSU->NumSuccsLeft < 0) {
200 cerr << "*** Scheduling failed! ***\n";
202 cerr << " has been released too many times!\n";
207 if (PredSU->NumSuccsLeft == 0) {
208 PredSU->isAvailable = true;
209 AvailableQueue->push(PredSU);
213 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
214 /// count of its predecessors. If a predecessor pending count is zero, add it to
215 /// the Available queue.
216 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
217 DOUT << "*** Scheduling [" << CurCycle << "]: ";
218 DEBUG(SU->dump(this));
220 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
221 SU->setHeightToAtLeast(CurCycle);
222 Sequence.push_back(SU);
224 // Bottom up: release predecessors
225 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
227 ReleasePred(SU, &*I);
228 if (I->isAssignedRegDep()) {
229 // This is a physical register dependency and it's impossible or
230 // expensive to copy the register. Make sure nothing that can
231 // clobber the register is scheduled between the predecessor and
233 if (!LiveRegDefs[I->getReg()]) {
235 LiveRegDefs[I->getReg()] = I->getSUnit();
236 LiveRegCycles[I->getReg()] = CurCycle;
241 // Release all the implicit physical register defs that are live.
242 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
244 if (I->isAssignedRegDep()) {
245 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
246 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
247 assert(LiveRegDefs[I->getReg()] == SU &&
248 "Physical register dependency violated?");
250 LiveRegDefs[I->getReg()] = NULL;
251 LiveRegCycles[I->getReg()] = 0;
256 SU->isScheduled = true;
257 AvailableQueue->ScheduledNode(SU);
260 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
261 /// unscheduled, incrcease the succ left count of its predecessors. Remove
262 /// them from AvailableQueue if necessary.
263 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
264 SUnit *PredSU = PredEdge->getSUnit();
265 if (PredSU->isAvailable) {
266 PredSU->isAvailable = false;
267 if (!PredSU->isPending)
268 AvailableQueue->remove(PredSU);
271 ++PredSU->NumSuccsLeft;
274 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
275 /// its predecessor states to reflect the change.
276 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
277 DOUT << "*** Unscheduling [" << SU->getHeight() << "]: ";
278 DEBUG(SU->dump(this));
280 AvailableQueue->UnscheduledNode(SU);
282 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
285 if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
286 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
287 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
288 "Physical register dependency violated?");
290 LiveRegDefs[I->getReg()] = NULL;
291 LiveRegCycles[I->getReg()] = 0;
295 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
297 if (I->isAssignedRegDep()) {
298 if (!LiveRegDefs[I->getReg()]) {
299 LiveRegDefs[I->getReg()] = SU;
302 if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
303 LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
307 SU->setHeightDirty();
308 SU->isScheduled = false;
309 SU->isAvailable = true;
310 AvailableQueue->push(SU);
313 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
314 /// BTCycle in order to schedule a specific node.
315 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
316 unsigned &CurCycle) {
318 while (CurCycle > BtCycle) {
319 OldSU = Sequence.back();
321 if (SU->isSucc(OldSU))
322 // Don't try to remove SU from AvailableQueue.
323 SU->isAvailable = false;
324 UnscheduleNodeBottomUp(OldSU);
328 assert(!SU->isSucc(OldSU) && "Something is wrong!");
333 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
334 /// successors to the newly created node.
335 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
336 if (SU->getNode()->getFlaggedNode())
339 SDNode *N = SU->getNode();
344 bool TryUnfold = false;
345 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
346 MVT VT = N->getValueType(i);
349 else if (VT == MVT::Other)
352 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
353 const SDValue &Op = N->getOperand(i);
354 MVT VT = Op.getNode()->getValueType(Op.getResNo());
360 SmallVector<SDNode*, 2> NewNodes;
361 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
364 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
365 assert(NewNodes.size() == 2 && "Expected a load folding node!");
368 SDNode *LoadNode = NewNodes[0];
369 unsigned NumVals = N->getNumValues();
370 unsigned OldNumVals = SU->getNode()->getNumValues();
371 for (unsigned i = 0; i != NumVals; ++i)
372 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
373 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
374 SDValue(LoadNode, 1));
376 // LoadNode may already exist. This can happen when there is another
377 // load from the same location and producing the same type of value
378 // but it has different alignment or volatileness.
379 bool isNewLoad = true;
381 if (LoadNode->getNodeId() != -1) {
382 LoadSU = &SUnits[LoadNode->getNodeId()];
385 LoadSU = CreateNewSUnit(LoadNode);
386 LoadNode->setNodeId(LoadSU->NodeNum);
387 ComputeLatency(LoadSU);
390 SUnit *NewSU = CreateNewSUnit(N);
391 assert(N->getNodeId() == -1 && "Node already inserted!");
392 N->setNodeId(NewSU->NodeNum);
394 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
395 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
396 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
397 NewSU->isTwoAddress = true;
401 if (TID.isCommutable())
402 NewSU->isCommutable = true;
403 ComputeLatency(NewSU);
406 SmallVector<SDep, 4> ChainSuccs;
407 SmallVector<SDep, 4> LoadPreds;
408 SmallVector<SDep, 4> NodePreds;
409 SmallVector<SDep, 4> NodeSuccs;
410 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
414 else if (I->getSUnit()->getNode() &&
415 I->getSUnit()->getNode()->isOperandOf(LoadNode))
416 LoadPreds.push_back(*I);
418 NodePreds.push_back(*I);
420 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
423 ChainSuccs.push_back(*I);
425 NodeSuccs.push_back(*I);
428 if (ChainPred.getSUnit()) {
429 RemovePred(SU, ChainPred);
431 AddPred(LoadSU, ChainPred);
433 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
434 const SDep &Pred = LoadPreds[i];
435 RemovePred(SU, Pred);
437 AddPred(LoadSU, Pred);
440 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
441 const SDep &Pred = NodePreds[i];
442 RemovePred(SU, Pred);
443 AddPred(NewSU, Pred);
445 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
446 SDep D = NodeSuccs[i];
447 SUnit *SuccDep = D.getSUnit();
449 RemovePred(SuccDep, D);
453 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
454 SDep D = ChainSuccs[i];
455 SUnit *SuccDep = D.getSUnit();
457 RemovePred(SuccDep, D);
464 AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
468 AvailableQueue->addNode(LoadSU);
469 AvailableQueue->addNode(NewSU);
473 if (NewSU->NumSuccsLeft == 0) {
474 NewSU->isAvailable = true;
480 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
481 NewSU = CreateClone(SU);
483 // New SUnit has the exact same predecessors.
484 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
486 if (!I->isArtificial())
489 // Only copy scheduled successors. Cut them from old node's successor
490 // list and move them over.
491 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
492 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
494 if (I->isArtificial())
496 SUnit *SuccSU = I->getSUnit();
497 if (SuccSU->isScheduled) {
502 DelDeps.push_back(std::make_pair(SuccSU, D));
505 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
506 RemovePred(DelDeps[i].first, DelDeps[i].second);
508 AvailableQueue->updateNode(SU);
509 AvailableQueue->addNode(NewSU);
515 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
516 /// scheduled successors of the given SUnit to the last copy.
517 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
518 const TargetRegisterClass *DestRC,
519 const TargetRegisterClass *SrcRC,
520 SmallVector<SUnit*, 2> &Copies) {
521 SUnit *CopyFromSU = CreateNewSUnit(NULL);
522 CopyFromSU->CopySrcRC = SrcRC;
523 CopyFromSU->CopyDstRC = DestRC;
525 SUnit *CopyToSU = CreateNewSUnit(NULL);
526 CopyToSU->CopySrcRC = DestRC;
527 CopyToSU->CopyDstRC = SrcRC;
529 // Only copy scheduled successors. Cut them from old node's successor
530 // list and move them over.
531 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
532 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
534 if (I->isArtificial())
536 SUnit *SuccSU = I->getSUnit();
537 if (SuccSU->isScheduled) {
539 D.setSUnit(CopyToSU);
541 DelDeps.push_back(std::make_pair(SuccSU, *I));
544 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
545 RemovePred(DelDeps[i].first, DelDeps[i].second);
547 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
548 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
550 AvailableQueue->updateNode(SU);
551 AvailableQueue->addNode(CopyFromSU);
552 AvailableQueue->addNode(CopyToSU);
553 Copies.push_back(CopyFromSU);
554 Copies.push_back(CopyToSU);
559 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
560 /// definition of the specified node.
561 /// FIXME: Move to SelectionDAG?
562 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
563 const TargetInstrInfo *TII) {
564 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
565 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
566 unsigned NumRes = TID.getNumDefs();
567 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
572 return N->getValueType(NumRes);
575 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
576 /// scheduling of the given node to satisfy live physical register dependencies.
577 /// If the specific node is the last one that's available to schedule, do
578 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
579 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
580 SmallVector<unsigned, 4> &LRegs){
581 if (NumLiveRegs == 0)
584 SmallSet<unsigned, 4> RegAdded;
585 // If this node would clobber any "live" register, then it's not ready.
586 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
588 if (I->isAssignedRegDep()) {
589 unsigned Reg = I->getReg();
590 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != I->getSUnit()) {
591 if (RegAdded.insert(Reg))
592 LRegs.push_back(Reg);
594 for (const unsigned *Alias = TRI->getAliasSet(Reg);
596 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != I->getSUnit()) {
597 if (RegAdded.insert(*Alias))
598 LRegs.push_back(*Alias);
603 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
604 if (!Node->isMachineOpcode())
606 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
607 if (!TID.ImplicitDefs)
609 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
610 if (LiveRegDefs[*Reg] && LiveRegDefs[*Reg] != SU) {
611 if (RegAdded.insert(*Reg))
612 LRegs.push_back(*Reg);
614 for (const unsigned *Alias = TRI->getAliasSet(*Reg);
616 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
617 if (RegAdded.insert(*Alias))
618 LRegs.push_back(*Alias);
622 return !LRegs.empty();
626 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
628 void ScheduleDAGRRList::ListScheduleBottomUp() {
629 unsigned CurCycle = 0;
630 // Add root to Available queue.
631 if (!SUnits.empty()) {
632 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
633 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
634 RootSU->isAvailable = true;
635 AvailableQueue->push(RootSU);
638 // While Available queue is not empty, grab the node with the highest
639 // priority. If it is not ready put it back. Schedule the node.
640 SmallVector<SUnit*, 4> NotReady;
641 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
642 Sequence.reserve(SUnits.size());
643 while (!AvailableQueue->empty()) {
644 bool Delayed = false;
646 SUnit *CurSU = AvailableQueue->pop();
648 SmallVector<unsigned, 4> LRegs;
649 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
652 LRegsMap.insert(std::make_pair(CurSU, LRegs));
654 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
655 NotReady.push_back(CurSU);
656 CurSU = AvailableQueue->pop();
659 // All candidates are delayed due to live physical reg dependencies.
660 // Try backtracking, code duplication, or inserting cross class copies
662 if (Delayed && !CurSU) {
663 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
664 SUnit *TrySU = NotReady[i];
665 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
667 // Try unscheduling up to the point where it's safe to schedule
669 unsigned LiveCycle = CurCycle;
670 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
671 unsigned Reg = LRegs[j];
672 unsigned LCycle = LiveRegCycles[Reg];
673 LiveCycle = std::min(LiveCycle, LCycle);
675 SUnit *OldSU = Sequence[LiveCycle];
676 if (!WillCreateCycle(TrySU, OldSU)) {
677 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
678 // Force the current node to be scheduled before the node that
679 // requires the physical reg dep.
680 if (OldSU->isAvailable) {
681 OldSU->isAvailable = false;
682 AvailableQueue->remove(OldSU);
684 AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
685 /*Reg=*/0, /*isNormalMemory=*/false,
686 /*isMustAlias=*/false, /*isArtificial=*/true));
687 // If one or more successors has been unscheduled, then the current
688 // node is no longer avaialable. Schedule a successor that's now
689 // available instead.
690 if (!TrySU->isAvailable)
691 CurSU = AvailableQueue->pop();
694 TrySU->isPending = false;
695 NotReady.erase(NotReady.begin()+i);
702 // Can't backtrack. If it's too expensive to copy the value, then try
703 // duplicate the nodes that produces these "too expensive to copy"
704 // values to break the dependency. In case even that doesn't work,
705 // insert cross class copies.
706 // If it's not too expensive, i.e. cost != -1, issue copies.
707 SUnit *TrySU = NotReady[0];
708 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
709 assert(LRegs.size() == 1 && "Can't handle this yet!");
710 unsigned Reg = LRegs[0];
711 SUnit *LRDef = LiveRegDefs[Reg];
712 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
713 const TargetRegisterClass *RC =
714 TRI->getPhysicalRegisterRegClass(Reg, VT);
715 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
717 // If cross copy register class is null, then it must be possible copy
718 // the value directly. Do not try duplicate the def.
721 NewDef = CopyAndMoveSuccessors(LRDef);
725 // Issue copies, these can be expensive cross register class copies.
726 SmallVector<SUnit*, 2> Copies;
727 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
728 DOUT << "Adding an edge from SU #" << TrySU->NodeNum
729 << " to SU #" << Copies.front()->NodeNum << "\n";
730 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
731 /*Reg=*/0, /*isNormalMemory=*/false,
732 /*isMustAlias=*/false,
733 /*isArtificial=*/true));
734 NewDef = Copies.back();
737 DOUT << "Adding an edge from SU #" << NewDef->NodeNum
738 << " to SU #" << TrySU->NodeNum << "\n";
739 LiveRegDefs[Reg] = NewDef;
740 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
741 /*Reg=*/0, /*isNormalMemory=*/false,
742 /*isMustAlias=*/false,
743 /*isArtificial=*/true));
744 TrySU->isAvailable = false;
748 assert(CurSU && "Unable to resolve live physical register dependencies!");
751 // Add the nodes that aren't ready back onto the available list.
752 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
753 NotReady[i]->isPending = false;
754 // May no longer be available due to backtracking.
755 if (NotReady[i]->isAvailable)
756 AvailableQueue->push(NotReady[i]);
761 ScheduleNodeBottomUp(CurSU, CurCycle);
765 // Reverse the order if it is bottom up.
766 std::reverse(Sequence.begin(), Sequence.end());
769 VerifySchedule(isBottomUp);
773 //===----------------------------------------------------------------------===//
774 // Top-Down Scheduling
775 //===----------------------------------------------------------------------===//
777 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
778 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
779 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
780 SUnit *SuccSU = SuccEdge->getSUnit();
781 --SuccSU->NumPredsLeft;
784 if (SuccSU->NumPredsLeft < 0) {
785 cerr << "*** Scheduling failed! ***\n";
787 cerr << " has been released too many times!\n";
792 if (SuccSU->NumPredsLeft == 0) {
793 SuccSU->isAvailable = true;
794 AvailableQueue->push(SuccSU);
798 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
799 /// count of its successors. If a successor pending count is zero, add it to
800 /// the Available queue.
801 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
802 DOUT << "*** Scheduling [" << CurCycle << "]: ";
803 DEBUG(SU->dump(this));
805 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
806 SU->setDepthToAtLeast(CurCycle);
807 Sequence.push_back(SU);
809 // Top down: release successors
810 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
812 assert(!I->isAssignedRegDep() &&
813 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
815 ReleaseSucc(SU, &*I);
818 SU->isScheduled = true;
819 AvailableQueue->ScheduledNode(SU);
822 /// ListScheduleTopDown - The main loop of list scheduling for top-down
824 void ScheduleDAGRRList::ListScheduleTopDown() {
825 unsigned CurCycle = 0;
827 // All leaves to Available queue.
828 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
829 // It is available if it has no predecessors.
830 if (SUnits[i].Preds.empty()) {
831 AvailableQueue->push(&SUnits[i]);
832 SUnits[i].isAvailable = true;
836 // While Available queue is not empty, grab the node with the highest
837 // priority. If it is not ready put it back. Schedule the node.
838 Sequence.reserve(SUnits.size());
839 while (!AvailableQueue->empty()) {
840 SUnit *CurSU = AvailableQueue->pop();
843 ScheduleNodeTopDown(CurSU, CurCycle);
848 VerifySchedule(isBottomUp);
853 //===----------------------------------------------------------------------===//
854 // RegReductionPriorityQueue Implementation
855 //===----------------------------------------------------------------------===//
857 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
858 // to reduce register pressure.
862 class RegReductionPriorityQueue;
864 /// Sorting functions for the Available queue.
865 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
866 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
867 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
868 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
870 bool operator()(const SUnit* left, const SUnit* right) const;
873 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
874 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
875 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
876 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
878 bool operator()(const SUnit* left, const SUnit* right) const;
880 } // end anonymous namespace
882 static inline bool isCopyFromLiveIn(const SUnit *SU) {
883 SDNode *N = SU->getNode();
884 return N && N->getOpcode() == ISD::CopyFromReg &&
885 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
888 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
889 /// Smaller number is the higher priority.
891 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
892 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
893 if (SethiUllmanNumber != 0)
894 return SethiUllmanNumber;
897 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
899 if (I->isCtrl()) continue; // ignore chain preds
900 SUnit *PredSU = I->getSUnit();
901 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
902 if (PredSethiUllman > SethiUllmanNumber) {
903 SethiUllmanNumber = PredSethiUllman;
905 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl())
909 SethiUllmanNumber += Extra;
911 if (SethiUllmanNumber == 0)
912 SethiUllmanNumber = 1;
914 return SethiUllmanNumber;
919 class VISIBILITY_HIDDEN RegReductionPriorityQueue
920 : public SchedulingPriorityQueue {
921 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
922 unsigned currentQueueId;
925 // SUnits - The SUnits for the current graph.
926 std::vector<SUnit> *SUnits;
928 const TargetInstrInfo *TII;
929 const TargetRegisterInfo *TRI;
930 ScheduleDAGRRList *scheduleDAG;
932 // SethiUllmanNumbers - The SethiUllman number for each node.
933 std::vector<unsigned> SethiUllmanNumbers;
936 RegReductionPriorityQueue(const TargetInstrInfo *tii,
937 const TargetRegisterInfo *tri) :
938 Queue(SF(this)), currentQueueId(0),
939 TII(tii), TRI(tri), scheduleDAG(NULL) {}
941 void initNodes(std::vector<SUnit> &sunits) {
943 // Add pseudo dependency edges for two-address nodes.
944 AddPseudoTwoAddrDeps();
945 // Calculate node priorities.
946 CalculateSethiUllmanNumbers();
949 void addNode(const SUnit *SU) {
950 unsigned SUSize = SethiUllmanNumbers.size();
951 if (SUnits->size() > SUSize)
952 SethiUllmanNumbers.resize(SUSize*2, 0);
953 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
956 void updateNode(const SUnit *SU) {
957 SethiUllmanNumbers[SU->NodeNum] = 0;
958 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
961 void releaseState() {
963 SethiUllmanNumbers.clear();
966 unsigned getNodePriority(const SUnit *SU) const {
967 assert(SU->NodeNum < SethiUllmanNumbers.size());
968 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
969 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
970 // CopyFromReg should be close to its def because it restricts
971 // allocation choices. But if it is a livein then perhaps we want it
972 // closer to its uses so it can be coalesced.
974 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
975 // CopyToReg should be close to its uses to facilitate coalescing and
978 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
979 Opc == TargetInstrInfo::INSERT_SUBREG)
980 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
981 // facilitate coalescing.
983 if (SU->NumSuccs == 0)
984 // If SU does not have a use, i.e. it doesn't produce a value that would
985 // be consumed (e.g. store), then it terminates a chain of computation.
986 // Give it a large SethiUllman number so it will be scheduled right
987 // before its predecessors that it doesn't lengthen their live ranges.
989 if (SU->NumPreds == 0)
990 // If SU does not have a def, schedule it close to its uses because it
991 // does not lengthen any live ranges.
993 return SethiUllmanNumbers[SU->NodeNum];
996 unsigned size() const { return Queue.size(); }
998 bool empty() const { return Queue.empty(); }
1000 void push(SUnit *U) {
1001 assert(!U->NodeQueueId && "Node in the queue already");
1002 U->NodeQueueId = ++currentQueueId;
1006 void push_all(const std::vector<SUnit *> &Nodes) {
1007 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1012 if (empty()) return NULL;
1013 SUnit *V = Queue.top();
1019 void remove(SUnit *SU) {
1020 assert(!Queue.empty() && "Queue is empty!");
1021 assert(SU->NodeQueueId != 0 && "Not in queue!");
1022 Queue.erase_one(SU);
1023 SU->NodeQueueId = 0;
1026 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1027 scheduleDAG = scheduleDag;
1031 bool canClobber(const SUnit *SU, const SUnit *Op);
1032 void AddPseudoTwoAddrDeps();
1033 void CalculateSethiUllmanNumbers();
1036 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1037 BURegReductionPriorityQueue;
1039 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1040 TDRegReductionPriorityQueue;
1043 /// closestSucc - Returns the scheduled cycle of the successor which is
1044 /// closet to the current cycle.
1045 static unsigned closestSucc(const SUnit *SU) {
1046 unsigned MaxHeight = 0;
1047 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1049 if (I->isCtrl()) continue; // ignore chain succs
1050 unsigned Height = I->getSUnit()->getHeight();
1051 // If there are bunch of CopyToRegs stacked up, they should be considered
1052 // to be at the same position.
1053 if (I->getSUnit()->getNode() &&
1054 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
1055 Height = closestSucc(I->getSUnit())+1;
1056 if (Height > MaxHeight)
1062 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1063 /// for scratch registers. Live-in operands and live-out results don't count
1064 /// since they are "fixed".
1065 static unsigned calcMaxScratches(const SUnit *SU) {
1066 unsigned Scratches = 0;
1067 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1069 if (I->isCtrl()) continue; // ignore chain preds
1070 if (!I->getSUnit()->getNode() ||
1071 I->getSUnit()->getNode()->getOpcode() != ISD::CopyFromReg)
1074 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1076 if (I->isCtrl()) continue; // ignore chain succs
1077 if (!I->getSUnit()->getNode() ||
1078 I->getSUnit()->getNode()->getOpcode() != ISD::CopyToReg)
1085 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1086 unsigned LPriority = SPQ->getNodePriority(left);
1087 unsigned RPriority = SPQ->getNodePriority(right);
1088 if (LPriority != RPriority)
1089 return LPriority > RPriority;
1091 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1096 // and the following instructions are both ready.
1100 // Then schedule t2 = op first.
1107 // This creates more short live intervals.
1108 unsigned LDist = closestSucc(left);
1109 unsigned RDist = closestSucc(right);
1111 return LDist < RDist;
1113 // Intuitively, it's good to push down instructions whose results are
1114 // liveout so their long live ranges won't conflict with other values
1115 // which are needed inside the BB. Further prioritize liveout instructions
1116 // by the number of operands which are calculated within the BB.
1117 unsigned LScratch = calcMaxScratches(left);
1118 unsigned RScratch = calcMaxScratches(right);
1119 if (LScratch != RScratch)
1120 return LScratch > RScratch;
1122 if (left->getHeight() != right->getHeight())
1123 return left->getHeight() > right->getHeight();
1125 if (left->getDepth() != right->getDepth())
1126 return left->getDepth() < right->getDepth();
1128 assert(left->NodeQueueId && right->NodeQueueId &&
1129 "NodeQueueId cannot be zero");
1130 return (left->NodeQueueId > right->NodeQueueId);
1135 RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
1136 if (SU->isTwoAddress) {
1137 unsigned Opc = SU->getNode()->getMachineOpcode();
1138 const TargetInstrDesc &TID = TII->get(Opc);
1139 unsigned NumRes = TID.getNumDefs();
1140 unsigned NumOps = TID.getNumOperands() - NumRes;
1141 for (unsigned i = 0; i != NumOps; ++i) {
1142 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1143 SDNode *DU = SU->getNode()->getOperand(i).getNode();
1144 if (DU->getNodeId() != -1 &&
1145 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1154 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1156 static bool hasCopyToRegUse(const SUnit *SU) {
1157 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1159 if (I->isCtrl()) continue;
1160 const SUnit *SuccSU = I->getSUnit();
1161 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
1167 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1168 /// physical register defs.
1169 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1170 const TargetInstrInfo *TII,
1171 const TargetRegisterInfo *TRI) {
1172 SDNode *N = SuccSU->getNode();
1173 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1174 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1175 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1176 const unsigned *SUImpDefs =
1177 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
1180 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1181 MVT VT = N->getValueType(i);
1182 if (VT == MVT::Flag || VT == MVT::Other)
1184 if (!N->hasAnyUseOfValue(i))
1186 unsigned Reg = ImpDefs[i - NumDefs];
1187 for (;*SUImpDefs; ++SUImpDefs) {
1188 unsigned SUReg = *SUImpDefs;
1189 if (TRI->regsOverlap(Reg, SUReg))
1196 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1197 /// it as a def&use operand. Add a pseudo control edge from it to the other
1198 /// node (if it won't create a cycle) so the two-address one will be scheduled
1199 /// first (lower in the schedule). If both nodes are two-address, favor the
1200 /// one that has a CopyToReg use (more likely to be a loop induction update).
1201 /// If both are two-address, but one is commutable while the other is not
1202 /// commutable, favor the one that's not commutable.
1204 void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1205 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1206 SUnit *SU = &(*SUnits)[i];
1207 if (!SU->isTwoAddress)
1210 SDNode *Node = SU->getNode();
1211 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
1214 unsigned Opc = Node->getMachineOpcode();
1215 const TargetInstrDesc &TID = TII->get(Opc);
1216 unsigned NumRes = TID.getNumDefs();
1217 unsigned NumOps = TID.getNumOperands() - NumRes;
1218 for (unsigned j = 0; j != NumOps; ++j) {
1219 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
1221 SDNode *DU = SU->getNode()->getOperand(j).getNode();
1222 if (DU->getNodeId() == -1)
1224 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1225 if (!DUSU) continue;
1226 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1227 E = DUSU->Succs.end(); I != E; ++I) {
1228 if (I->isCtrl()) continue;
1229 SUnit *SuccSU = I->getSUnit();
1232 // Be conservative. Ignore if nodes aren't at roughly the same
1233 // depth and height.
1234 if (SuccSU->getHeight() < SU->getHeight() &&
1235 (SU->getHeight() - SuccSU->getHeight()) > 1)
1237 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
1239 // Don't constrain nodes with physical register defs if the
1240 // predecessor can clobber them.
1241 if (SuccSU->hasPhysRegDefs) {
1242 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1245 // Don't constraint extract_subreg / insert_subreg these may be
1246 // coalesced away. We don't them close to their uses.
1247 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
1248 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1249 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1251 if ((!canClobber(SuccSU, DUSU) ||
1252 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1253 (!SU->isCommutable && SuccSU->isCommutable)) &&
1254 !scheduleDAG->IsReachable(SuccSU, SU)) {
1255 DOUT << "Adding a pseudo-two-addr edge from SU # " << SU->NodeNum
1256 << " to SU #" << SuccSU->NodeNum << "\n";
1257 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
1258 /*Reg=*/0, /*isNormalMemory=*/false,
1259 /*isMustAlias=*/false,
1260 /*isArtificial=*/true));
1267 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1268 /// scheduling units.
1270 void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1271 SethiUllmanNumbers.assign(SUnits->size(), 0);
1273 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1274 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1277 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1278 /// predecessors of the successors of the SUnit SU. Stop when the provided
1279 /// limit is exceeded.
1280 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1283 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1285 const SUnit *SuccSU = I->getSUnit();
1286 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1287 EE = SuccSU->Preds.end(); II != EE; ++II) {
1288 SUnit *PredSU = II->getSUnit();
1289 if (!PredSU->isScheduled)
1299 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1300 unsigned LPriority = SPQ->getNodePriority(left);
1301 unsigned RPriority = SPQ->getNodePriority(right);
1302 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
1303 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
1304 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1305 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1306 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1307 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1309 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1311 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1318 if (left->NumSuccs == 1)
1320 if (right->NumSuccs == 1)
1323 if (LPriority+LBonus != RPriority+RBonus)
1324 return LPriority+LBonus < RPriority+RBonus;
1326 if (left->getDepth() != right->getDepth())
1327 return left->getDepth() < right->getDepth();
1329 if (left->NumSuccsLeft != right->NumSuccsLeft)
1330 return left->NumSuccsLeft > right->NumSuccsLeft;
1332 assert(left->NodeQueueId && right->NodeQueueId &&
1333 "NodeQueueId cannot be zero");
1334 return (left->NodeQueueId > right->NodeQueueId);
1337 //===----------------------------------------------------------------------===//
1338 // Public Constructor Functions
1339 //===----------------------------------------------------------------------===//
1341 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1343 const TargetMachine &TM = IS->TM;
1344 const TargetInstrInfo *TII = TM.getInstrInfo();
1345 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1347 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1349 ScheduleDAGRRList *SD =
1350 new ScheduleDAGRRList(*IS->MF, true, PQ);
1351 PQ->setScheduleDAG(SD);
1355 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1357 const TargetMachine &TM = IS->TM;
1358 const TargetInstrInfo *TII = TM.getInstrInfo();
1359 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1361 TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI);
1363 ScheduleDAGRRList *SD =
1364 new ScheduleDAGRRList(*IS->MF, false, PQ);
1365 PQ->setScheduleDAG(SD);