1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/Target/TargetRegisterInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/ADT/PriorityQueue.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
35 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
36 STATISTIC(NumUnfolds, "Number of nodes unfolded");
37 STATISTIC(NumDups, "Number of duplicated nodes");
38 STATISTIC(NumPRCopies, "Number of physical register copies");
40 static RegisterScheduler
41 burrListDAGScheduler("list-burr",
42 "Bottom-up register reduction list scheduling",
43 createBURRListDAGScheduler);
44 static RegisterScheduler
45 tdrListrDAGScheduler("list-tdrr",
46 "Top-down register reduction list scheduling",
47 createTDRRListDAGScheduler);
50 //===----------------------------------------------------------------------===//
51 /// ScheduleDAGRRList - The actual register reduction list scheduler
52 /// implementation. This supports both top-down and bottom-up scheduling.
54 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAGSDNodes {
56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
60 /// AvailableQueue - The priority queue to use for the available SUnits.
61 SchedulingPriorityQueue *AvailableQueue;
63 /// LiveRegDefs - A set of physical registers and their definition
64 /// that are "live". These nodes must be scheduled before any other nodes that
65 /// modifies the registers can be scheduled.
67 std::vector<SUnit*> LiveRegDefs;
68 std::vector<unsigned> LiveRegCycles;
70 /// Topo - A topological ordering for SUnits which permits fast IsReachable
71 /// and similar queries.
72 ScheduleDAGTopologicalSort Topo;
75 ScheduleDAGRRList(MachineFunction &mf,
77 SchedulingPriorityQueue *availqueue)
78 : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup),
79 AvailableQueue(availqueue), Topo(SUnits) {
82 ~ScheduleDAGRRList() {
83 delete AvailableQueue;
88 /// IsReachable - Checks if SU is reachable from TargetSU.
89 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
90 return Topo.IsReachable(SU, TargetSU);
93 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
95 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
96 return Topo.WillCreateCycle(SU, TargetSU);
99 /// AddPred - adds a predecessor edge to SUnit SU.
100 /// This returns true if this is a new predecessor.
101 /// Updates the topological ordering if required.
102 void AddPred(SUnit *SU, const SDep &D) {
103 Topo.AddPred(SU, D.getSUnit());
107 /// RemovePred - removes a predecessor edge from SUnit SU.
108 /// This returns true if an edge was removed.
109 /// Updates the topological ordering if required.
110 void RemovePred(SUnit *SU, const SDep &D) {
111 Topo.RemovePred(SU, D.getSUnit());
116 void ReleasePred(SUnit *SU, const SDep *PredEdge);
117 void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
118 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
119 void ReleaseSuccessors(SUnit *SU);
120 void CapturePred(SDep *PredEdge);
121 void ScheduleNodeBottomUp(SUnit*, unsigned);
122 void ScheduleNodeTopDown(SUnit*, unsigned);
123 void UnscheduleNodeBottomUp(SUnit*);
124 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
125 SUnit *CopyAndMoveSuccessors(SUnit*);
126 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
127 const TargetRegisterClass*,
128 const TargetRegisterClass*,
129 SmallVector<SUnit*, 2>&);
130 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
131 void ListScheduleTopDown();
132 void ListScheduleBottomUp();
135 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
136 /// Updates the topological ordering if required.
137 SUnit *CreateNewSUnit(SDNode *N) {
138 unsigned NumSUnits = SUnits.size();
139 SUnit *NewNode = NewSUnit(N);
140 // Update the topological ordering.
141 if (NewNode->NodeNum >= NumSUnits)
142 Topo.InitDAGTopologicalSorting();
146 /// CreateClone - Creates a new SUnit from an existing one.
147 /// Updates the topological ordering if required.
148 SUnit *CreateClone(SUnit *N) {
149 unsigned NumSUnits = SUnits.size();
150 SUnit *NewNode = Clone(N);
151 // Update the topological ordering.
152 if (NewNode->NodeNum >= NumSUnits)
153 Topo.InitDAGTopologicalSorting();
157 /// ForceUnitLatencies - Return true, since register-pressure-reducing
158 /// scheduling doesn't need actual latency information.
159 bool ForceUnitLatencies() const { return true; }
161 } // end anonymous namespace
164 /// Schedule - Schedule the DAG using list scheduling.
165 void ScheduleDAGRRList::Schedule() {
166 DOUT << "********** List Scheduling **********\n";
169 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
170 LiveRegCycles.resize(TRI->getNumRegs(), 0);
172 // Build the scheduling graph.
175 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
176 SUnits[su].dumpAll(this));
177 Topo.InitDAGTopologicalSorting();
179 AvailableQueue->initNodes(SUnits);
181 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
183 ListScheduleBottomUp();
185 ListScheduleTopDown();
187 AvailableQueue->releaseState();
190 //===----------------------------------------------------------------------===//
191 // Bottom-Up Scheduling
192 //===----------------------------------------------------------------------===//
194 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
195 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
196 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
197 SUnit *PredSU = PredEdge->getSUnit();
198 --PredSU->NumSuccsLeft;
201 if (PredSU->NumSuccsLeft < 0) {
202 cerr << "*** Scheduling failed! ***\n";
204 cerr << " has been released too many times!\n";
209 // If all the node's successors are scheduled, this node is ready
210 // to be scheduled. Ignore the special EntrySU node.
211 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
212 PredSU->isAvailable = true;
213 AvailableQueue->push(PredSU);
217 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
218 // Bottom up: release predecessors
219 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
221 ReleasePred(SU, &*I);
222 if (I->isAssignedRegDep()) {
223 // This is a physical register dependency and it's impossible or
224 // expensive to copy the register. Make sure nothing that can
225 // clobber the register is scheduled between the predecessor and
227 if (!LiveRegDefs[I->getReg()]) {
229 LiveRegDefs[I->getReg()] = I->getSUnit();
230 LiveRegCycles[I->getReg()] = CurCycle;
236 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
237 /// count of its predecessors. If a predecessor pending count is zero, add it to
238 /// the Available queue.
239 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
240 DOUT << "*** Scheduling [" << CurCycle << "]: ";
241 DEBUG(SU->dump(this));
243 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
244 SU->setHeightToAtLeast(CurCycle);
245 Sequence.push_back(SU);
247 ReleasePredecessors(SU, CurCycle);
249 // Release all the implicit physical register defs that are live.
250 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
252 if (I->isAssignedRegDep()) {
253 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
254 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
255 assert(LiveRegDefs[I->getReg()] == SU &&
256 "Physical register dependency violated?");
258 LiveRegDefs[I->getReg()] = NULL;
259 LiveRegCycles[I->getReg()] = 0;
264 SU->isScheduled = true;
265 AvailableQueue->ScheduledNode(SU);
268 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
269 /// unscheduled, incrcease the succ left count of its predecessors. Remove
270 /// them from AvailableQueue if necessary.
271 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
272 SUnit *PredSU = PredEdge->getSUnit();
273 if (PredSU->isAvailable) {
274 PredSU->isAvailable = false;
275 if (!PredSU->isPending)
276 AvailableQueue->remove(PredSU);
279 ++PredSU->NumSuccsLeft;
282 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
283 /// its predecessor states to reflect the change.
284 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
285 DOUT << "*** Unscheduling [" << SU->getHeight() << "]: ";
286 DEBUG(SU->dump(this));
288 AvailableQueue->UnscheduledNode(SU);
290 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
293 if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
294 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
295 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
296 "Physical register dependency violated?");
298 LiveRegDefs[I->getReg()] = NULL;
299 LiveRegCycles[I->getReg()] = 0;
303 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
305 if (I->isAssignedRegDep()) {
306 if (!LiveRegDefs[I->getReg()]) {
307 LiveRegDefs[I->getReg()] = SU;
310 if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
311 LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
315 SU->setHeightDirty();
316 SU->isScheduled = false;
317 SU->isAvailable = true;
318 AvailableQueue->push(SU);
321 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
322 /// BTCycle in order to schedule a specific node.
323 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
324 unsigned &CurCycle) {
326 while (CurCycle > BtCycle) {
327 OldSU = Sequence.back();
329 if (SU->isSucc(OldSU))
330 // Don't try to remove SU from AvailableQueue.
331 SU->isAvailable = false;
332 UnscheduleNodeBottomUp(OldSU);
336 assert(!SU->isSucc(OldSU) && "Something is wrong!");
341 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
342 /// successors to the newly created node.
343 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
344 if (SU->getNode()->getFlaggedNode())
347 SDNode *N = SU->getNode();
352 bool TryUnfold = false;
353 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
354 MVT VT = N->getValueType(i);
357 else if (VT == MVT::Other)
360 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
361 const SDValue &Op = N->getOperand(i);
362 MVT VT = Op.getNode()->getValueType(Op.getResNo());
368 SmallVector<SDNode*, 2> NewNodes;
369 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
372 DOUT << "Unfolding SU # " << SU->NodeNum << "\n";
373 assert(NewNodes.size() == 2 && "Expected a load folding node!");
376 SDNode *LoadNode = NewNodes[0];
377 unsigned NumVals = N->getNumValues();
378 unsigned OldNumVals = SU->getNode()->getNumValues();
379 for (unsigned i = 0; i != NumVals; ++i)
380 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
381 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
382 SDValue(LoadNode, 1));
384 // LoadNode may already exist. This can happen when there is another
385 // load from the same location and producing the same type of value
386 // but it has different alignment or volatileness.
387 bool isNewLoad = true;
389 if (LoadNode->getNodeId() != -1) {
390 LoadSU = &SUnits[LoadNode->getNodeId()];
393 LoadSU = CreateNewSUnit(LoadNode);
394 LoadNode->setNodeId(LoadSU->NodeNum);
395 ComputeLatency(LoadSU);
398 SUnit *NewSU = CreateNewSUnit(N);
399 assert(N->getNodeId() == -1 && "Node already inserted!");
400 N->setNodeId(NewSU->NodeNum);
402 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
403 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
404 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
405 NewSU->isTwoAddress = true;
409 if (TID.isCommutable())
410 NewSU->isCommutable = true;
411 ComputeLatency(NewSU);
414 SmallVector<SDep, 4> ChainSuccs;
415 SmallVector<SDep, 4> LoadPreds;
416 SmallVector<SDep, 4> NodePreds;
417 SmallVector<SDep, 4> NodeSuccs;
418 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
422 else if (I->getSUnit()->getNode() &&
423 I->getSUnit()->getNode()->isOperandOf(LoadNode))
424 LoadPreds.push_back(*I);
426 NodePreds.push_back(*I);
428 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
431 ChainSuccs.push_back(*I);
433 NodeSuccs.push_back(*I);
436 if (ChainPred.getSUnit()) {
437 RemovePred(SU, ChainPred);
439 AddPred(LoadSU, ChainPred);
441 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
442 const SDep &Pred = LoadPreds[i];
443 RemovePred(SU, Pred);
445 AddPred(LoadSU, Pred);
448 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
449 const SDep &Pred = NodePreds[i];
450 RemovePred(SU, Pred);
451 AddPred(NewSU, Pred);
453 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
454 SDep D = NodeSuccs[i];
455 SUnit *SuccDep = D.getSUnit();
457 RemovePred(SuccDep, D);
461 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
462 SDep D = ChainSuccs[i];
463 SUnit *SuccDep = D.getSUnit();
465 RemovePred(SuccDep, D);
472 AddPred(NewSU, SDep(LoadSU, SDep::Order, LoadSU->Latency));
476 AvailableQueue->addNode(LoadSU);
477 AvailableQueue->addNode(NewSU);
481 if (NewSU->NumSuccsLeft == 0) {
482 NewSU->isAvailable = true;
488 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
489 NewSU = CreateClone(SU);
491 // New SUnit has the exact same predecessors.
492 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
494 if (!I->isArtificial())
497 // Only copy scheduled successors. Cut them from old node's successor
498 // list and move them over.
499 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
500 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
502 if (I->isArtificial())
504 SUnit *SuccSU = I->getSUnit();
505 if (SuccSU->isScheduled) {
510 DelDeps.push_back(std::make_pair(SuccSU, D));
513 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
514 RemovePred(DelDeps[i].first, DelDeps[i].second);
516 AvailableQueue->updateNode(SU);
517 AvailableQueue->addNode(NewSU);
523 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
524 /// scheduled successors of the given SUnit to the last copy.
525 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
526 const TargetRegisterClass *DestRC,
527 const TargetRegisterClass *SrcRC,
528 SmallVector<SUnit*, 2> &Copies) {
529 SUnit *CopyFromSU = CreateNewSUnit(NULL);
530 CopyFromSU->CopySrcRC = SrcRC;
531 CopyFromSU->CopyDstRC = DestRC;
533 SUnit *CopyToSU = CreateNewSUnit(NULL);
534 CopyToSU->CopySrcRC = DestRC;
535 CopyToSU->CopyDstRC = SrcRC;
537 // Only copy scheduled successors. Cut them from old node's successor
538 // list and move them over.
539 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
540 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
542 if (I->isArtificial())
544 SUnit *SuccSU = I->getSUnit();
545 if (SuccSU->isScheduled) {
547 D.setSUnit(CopyToSU);
549 DelDeps.push_back(std::make_pair(SuccSU, *I));
552 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
553 RemovePred(DelDeps[i].first, DelDeps[i].second);
555 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
556 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
558 AvailableQueue->updateNode(SU);
559 AvailableQueue->addNode(CopyFromSU);
560 AvailableQueue->addNode(CopyToSU);
561 Copies.push_back(CopyFromSU);
562 Copies.push_back(CopyToSU);
567 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
568 /// definition of the specified node.
569 /// FIXME: Move to SelectionDAG?
570 static MVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
571 const TargetInstrInfo *TII) {
572 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
573 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
574 unsigned NumRes = TID.getNumDefs();
575 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
580 return N->getValueType(NumRes);
583 /// CheckForLiveRegDef - Return true and update live register vector if the
584 /// specified register def of the specified SUnit clobbers any "live" registers.
585 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
586 std::vector<SUnit*> &LiveRegDefs,
587 SmallSet<unsigned, 4> &RegAdded,
588 SmallVector<unsigned, 4> &LRegs,
589 const TargetRegisterInfo *TRI) {
591 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
592 if (RegAdded.insert(Reg)) {
593 LRegs.push_back(Reg);
597 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
598 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
599 if (RegAdded.insert(*Alias)) {
600 LRegs.push_back(*Alias);
607 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
608 /// scheduling of the given node to satisfy live physical register dependencies.
609 /// If the specific node is the last one that's available to schedule, do
610 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
611 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
612 SmallVector<unsigned, 4> &LRegs){
613 if (NumLiveRegs == 0)
616 SmallSet<unsigned, 4> RegAdded;
617 // If this node would clobber any "live" register, then it's not ready.
618 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
620 if (I->isAssignedRegDep())
621 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
622 RegAdded, LRegs, TRI);
625 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
626 if (Node->getOpcode() == ISD::INLINEASM) {
627 // Inline asm can clobber physical defs.
628 unsigned NumOps = Node->getNumOperands();
629 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
630 --NumOps; // Ignore the flag operand.
632 for (unsigned i = 2; i != NumOps;) {
634 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
635 unsigned NumVals = Flags >> 3;
637 ++i; // Skip the ID value.
638 if ((Flags & 7) == 2 || (Flags & 7) == 6) {
639 // Check for def of register or earlyclobber register.
640 for (; NumVals; --NumVals, ++i) {
641 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
642 if (TargetRegisterInfo::isPhysicalRegister(Reg))
643 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
651 if (!Node->isMachineOpcode())
653 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
654 if (!TID.ImplicitDefs)
656 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
657 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
659 return !LRegs.empty();
663 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
665 void ScheduleDAGRRList::ListScheduleBottomUp() {
666 unsigned CurCycle = 0;
668 // Release any predecessors of the special Exit node.
669 ReleasePredecessors(&ExitSU, CurCycle);
671 // Add root to Available queue.
672 if (!SUnits.empty()) {
673 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
674 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
675 RootSU->isAvailable = true;
676 AvailableQueue->push(RootSU);
679 // While Available queue is not empty, grab the node with the highest
680 // priority. If it is not ready put it back. Schedule the node.
681 SmallVector<SUnit*, 4> NotReady;
682 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
683 Sequence.reserve(SUnits.size());
684 while (!AvailableQueue->empty()) {
685 bool Delayed = false;
687 SUnit *CurSU = AvailableQueue->pop();
689 SmallVector<unsigned, 4> LRegs;
690 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
693 LRegsMap.insert(std::make_pair(CurSU, LRegs));
695 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
696 NotReady.push_back(CurSU);
697 CurSU = AvailableQueue->pop();
700 // All candidates are delayed due to live physical reg dependencies.
701 // Try backtracking, code duplication, or inserting cross class copies
703 if (Delayed && !CurSU) {
704 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
705 SUnit *TrySU = NotReady[i];
706 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
708 // Try unscheduling up to the point where it's safe to schedule
710 unsigned LiveCycle = CurCycle;
711 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
712 unsigned Reg = LRegs[j];
713 unsigned LCycle = LiveRegCycles[Reg];
714 LiveCycle = std::min(LiveCycle, LCycle);
716 SUnit *OldSU = Sequence[LiveCycle];
717 if (!WillCreateCycle(TrySU, OldSU)) {
718 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
719 // Force the current node to be scheduled before the node that
720 // requires the physical reg dep.
721 if (OldSU->isAvailable) {
722 OldSU->isAvailable = false;
723 AvailableQueue->remove(OldSU);
725 AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
726 /*Reg=*/0, /*isNormalMemory=*/false,
727 /*isMustAlias=*/false, /*isArtificial=*/true));
728 // If one or more successors has been unscheduled, then the current
729 // node is no longer avaialable. Schedule a successor that's now
730 // available instead.
731 if (!TrySU->isAvailable)
732 CurSU = AvailableQueue->pop();
735 TrySU->isPending = false;
736 NotReady.erase(NotReady.begin()+i);
743 // Can't backtrack. If it's too expensive to copy the value, then try
744 // duplicate the nodes that produces these "too expensive to copy"
745 // values to break the dependency. In case even that doesn't work,
746 // insert cross class copies.
747 // If it's not too expensive, i.e. cost != -1, issue copies.
748 SUnit *TrySU = NotReady[0];
749 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
750 assert(LRegs.size() == 1 && "Can't handle this yet!");
751 unsigned Reg = LRegs[0];
752 SUnit *LRDef = LiveRegDefs[Reg];
753 MVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
754 const TargetRegisterClass *RC =
755 TRI->getPhysicalRegisterRegClass(Reg, VT);
756 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
758 // If cross copy register class is null, then it must be possible copy
759 // the value directly. Do not try duplicate the def.
762 NewDef = CopyAndMoveSuccessors(LRDef);
766 // Issue copies, these can be expensive cross register class copies.
767 SmallVector<SUnit*, 2> Copies;
768 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
769 DOUT << "Adding an edge from SU #" << TrySU->NodeNum
770 << " to SU #" << Copies.front()->NodeNum << "\n";
771 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
772 /*Reg=*/0, /*isNormalMemory=*/false,
773 /*isMustAlias=*/false,
774 /*isArtificial=*/true));
775 NewDef = Copies.back();
778 DOUT << "Adding an edge from SU #" << NewDef->NodeNum
779 << " to SU #" << TrySU->NodeNum << "\n";
780 LiveRegDefs[Reg] = NewDef;
781 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
782 /*Reg=*/0, /*isNormalMemory=*/false,
783 /*isMustAlias=*/false,
784 /*isArtificial=*/true));
785 TrySU->isAvailable = false;
789 assert(CurSU && "Unable to resolve live physical register dependencies!");
792 // Add the nodes that aren't ready back onto the available list.
793 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
794 NotReady[i]->isPending = false;
795 // May no longer be available due to backtracking.
796 if (NotReady[i]->isAvailable)
797 AvailableQueue->push(NotReady[i]);
802 ScheduleNodeBottomUp(CurSU, CurCycle);
806 // Reverse the order if it is bottom up.
807 std::reverse(Sequence.begin(), Sequence.end());
810 VerifySchedule(isBottomUp);
814 //===----------------------------------------------------------------------===//
815 // Top-Down Scheduling
816 //===----------------------------------------------------------------------===//
818 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
819 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
820 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
821 SUnit *SuccSU = SuccEdge->getSUnit();
822 --SuccSU->NumPredsLeft;
825 if (SuccSU->NumPredsLeft < 0) {
826 cerr << "*** Scheduling failed! ***\n";
828 cerr << " has been released too many times!\n";
833 // If all the node's predecessors are scheduled, this node is ready
834 // to be scheduled. Ignore the special ExitSU node.
835 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
836 SuccSU->isAvailable = true;
837 AvailableQueue->push(SuccSU);
841 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
842 // Top down: release successors
843 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
845 assert(!I->isAssignedRegDep() &&
846 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
848 ReleaseSucc(SU, &*I);
852 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
853 /// count of its successors. If a successor pending count is zero, add it to
854 /// the Available queue.
855 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
856 DOUT << "*** Scheduling [" << CurCycle << "]: ";
857 DEBUG(SU->dump(this));
859 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
860 SU->setDepthToAtLeast(CurCycle);
861 Sequence.push_back(SU);
863 ReleaseSuccessors(SU);
864 SU->isScheduled = true;
865 AvailableQueue->ScheduledNode(SU);
868 /// ListScheduleTopDown - The main loop of list scheduling for top-down
870 void ScheduleDAGRRList::ListScheduleTopDown() {
871 unsigned CurCycle = 0;
873 // Release any successors of the special Entry node.
874 ReleaseSuccessors(&EntrySU);
876 // All leaves to Available queue.
877 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
878 // It is available if it has no predecessors.
879 if (SUnits[i].Preds.empty()) {
880 AvailableQueue->push(&SUnits[i]);
881 SUnits[i].isAvailable = true;
885 // While Available queue is not empty, grab the node with the highest
886 // priority. If it is not ready put it back. Schedule the node.
887 Sequence.reserve(SUnits.size());
888 while (!AvailableQueue->empty()) {
889 SUnit *CurSU = AvailableQueue->pop();
892 ScheduleNodeTopDown(CurSU, CurCycle);
897 VerifySchedule(isBottomUp);
902 //===----------------------------------------------------------------------===//
903 // RegReductionPriorityQueue Implementation
904 //===----------------------------------------------------------------------===//
906 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
907 // to reduce register pressure.
911 class RegReductionPriorityQueue;
913 /// Sorting functions for the Available queue.
914 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
915 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
916 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
917 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
919 bool operator()(const SUnit* left, const SUnit* right) const;
922 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
923 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
924 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
925 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
927 bool operator()(const SUnit* left, const SUnit* right) const;
929 } // end anonymous namespace
931 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
932 /// Smaller number is the higher priority.
934 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
935 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
936 if (SethiUllmanNumber != 0)
937 return SethiUllmanNumber;
940 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
942 if (I->isCtrl()) continue; // ignore chain preds
943 SUnit *PredSU = I->getSUnit();
944 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
945 if (PredSethiUllman > SethiUllmanNumber) {
946 SethiUllmanNumber = PredSethiUllman;
948 } else if (PredSethiUllman == SethiUllmanNumber)
952 SethiUllmanNumber += Extra;
954 if (SethiUllmanNumber == 0)
955 SethiUllmanNumber = 1;
957 return SethiUllmanNumber;
962 class VISIBILITY_HIDDEN RegReductionPriorityQueue
963 : public SchedulingPriorityQueue {
964 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
965 unsigned currentQueueId;
968 // SUnits - The SUnits for the current graph.
969 std::vector<SUnit> *SUnits;
971 const TargetInstrInfo *TII;
972 const TargetRegisterInfo *TRI;
973 ScheduleDAGRRList *scheduleDAG;
975 // SethiUllmanNumbers - The SethiUllman number for each node.
976 std::vector<unsigned> SethiUllmanNumbers;
979 RegReductionPriorityQueue(const TargetInstrInfo *tii,
980 const TargetRegisterInfo *tri) :
981 Queue(SF(this)), currentQueueId(0),
982 TII(tii), TRI(tri), scheduleDAG(NULL) {}
984 void initNodes(std::vector<SUnit> &sunits) {
986 // Add pseudo dependency edges for two-address nodes.
987 AddPseudoTwoAddrDeps();
988 // Calculate node priorities.
989 CalculateSethiUllmanNumbers();
992 void addNode(const SUnit *SU) {
993 unsigned SUSize = SethiUllmanNumbers.size();
994 if (SUnits->size() > SUSize)
995 SethiUllmanNumbers.resize(SUSize*2, 0);
996 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
999 void updateNode(const SUnit *SU) {
1000 SethiUllmanNumbers[SU->NodeNum] = 0;
1001 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1004 void releaseState() {
1006 SethiUllmanNumbers.clear();
1009 unsigned getNodePriority(const SUnit *SU) const {
1010 assert(SU->NodeNum < SethiUllmanNumbers.size());
1011 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1012 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1013 // CopyToReg should be close to its uses to facilitate coalescing and
1016 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1017 Opc == TargetInstrInfo::INSERT_SUBREG)
1018 // EXTRACT_SUBREG / INSERT_SUBREG should be close to its use to
1019 // facilitate coalescing.
1021 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1022 // If SU does not have a register use, i.e. it doesn't produce a value
1023 // that would be consumed (e.g. store), then it terminates a chain of
1024 // computation. Give it a large SethiUllman number so it will be
1025 // scheduled right before its predecessors that it doesn't lengthen
1026 // their live ranges.
1028 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1029 // If SU does not have a register def, schedule it close to its uses
1030 // because it does not lengthen any live ranges.
1032 return SethiUllmanNumbers[SU->NodeNum];
1035 unsigned size() const { return Queue.size(); }
1037 bool empty() const { return Queue.empty(); }
1039 void push(SUnit *U) {
1040 assert(!U->NodeQueueId && "Node in the queue already");
1041 U->NodeQueueId = ++currentQueueId;
1045 void push_all(const std::vector<SUnit *> &Nodes) {
1046 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1051 if (empty()) return NULL;
1052 SUnit *V = Queue.top();
1058 void remove(SUnit *SU) {
1059 assert(!Queue.empty() && "Queue is empty!");
1060 assert(SU->NodeQueueId != 0 && "Not in queue!");
1061 Queue.erase_one(SU);
1062 SU->NodeQueueId = 0;
1065 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1066 scheduleDAG = scheduleDag;
1070 bool canClobber(const SUnit *SU, const SUnit *Op);
1071 void AddPseudoTwoAddrDeps();
1072 void CalculateSethiUllmanNumbers();
1075 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1076 BURegReductionPriorityQueue;
1078 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1079 TDRegReductionPriorityQueue;
1082 /// closestSucc - Returns the scheduled cycle of the successor which is
1083 /// closet to the current cycle.
1084 static unsigned closestSucc(const SUnit *SU) {
1085 unsigned MaxHeight = 0;
1086 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1088 if (I->isCtrl()) continue; // ignore chain succs
1089 unsigned Height = I->getSUnit()->getHeight();
1090 // If there are bunch of CopyToRegs stacked up, they should be considered
1091 // to be at the same position.
1092 if (I->getSUnit()->getNode() &&
1093 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
1094 Height = closestSucc(I->getSUnit())+1;
1095 if (Height > MaxHeight)
1101 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1102 /// for scratch registers, i.e. number of data dependencies.
1103 static unsigned calcMaxScratches(const SUnit *SU) {
1104 unsigned Scratches = 0;
1105 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1107 if (I->isCtrl()) continue; // ignore chain preds
1114 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1115 unsigned LPriority = SPQ->getNodePriority(left);
1116 unsigned RPriority = SPQ->getNodePriority(right);
1117 if (LPriority != RPriority)
1118 return LPriority > RPriority;
1120 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1125 // and the following instructions are both ready.
1129 // Then schedule t2 = op first.
1136 // This creates more short live intervals.
1137 unsigned LDist = closestSucc(left);
1138 unsigned RDist = closestSucc(right);
1140 return LDist < RDist;
1142 // How many registers becomes live when the node is scheduled.
1143 unsigned LScratch = calcMaxScratches(left);
1144 unsigned RScratch = calcMaxScratches(right);
1145 if (LScratch != RScratch)
1146 return LScratch > RScratch;
1148 if (left->getHeight() != right->getHeight())
1149 return left->getHeight() > right->getHeight();
1151 if (left->getDepth() != right->getDepth())
1152 return left->getDepth() < right->getDepth();
1154 assert(left->NodeQueueId && right->NodeQueueId &&
1155 "NodeQueueId cannot be zero");
1156 return (left->NodeQueueId > right->NodeQueueId);
1161 RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
1162 if (SU->isTwoAddress) {
1163 unsigned Opc = SU->getNode()->getMachineOpcode();
1164 const TargetInstrDesc &TID = TII->get(Opc);
1165 unsigned NumRes = TID.getNumDefs();
1166 unsigned NumOps = TID.getNumOperands() - NumRes;
1167 for (unsigned i = 0; i != NumOps; ++i) {
1168 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1169 SDNode *DU = SU->getNode()->getOperand(i).getNode();
1170 if (DU->getNodeId() != -1 &&
1171 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1180 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1182 static bool hasCopyToRegUse(const SUnit *SU) {
1183 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1185 if (I->isCtrl()) continue;
1186 const SUnit *SuccSU = I->getSUnit();
1187 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
1193 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1194 /// physical register defs.
1195 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1196 const TargetInstrInfo *TII,
1197 const TargetRegisterInfo *TRI) {
1198 SDNode *N = SuccSU->getNode();
1199 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1200 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1201 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1202 const unsigned *SUImpDefs =
1203 TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
1206 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1207 MVT VT = N->getValueType(i);
1208 if (VT == MVT::Flag || VT == MVT::Other)
1210 if (!N->hasAnyUseOfValue(i))
1212 unsigned Reg = ImpDefs[i - NumDefs];
1213 for (;*SUImpDefs; ++SUImpDefs) {
1214 unsigned SUReg = *SUImpDefs;
1215 if (TRI->regsOverlap(Reg, SUReg))
1222 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1223 /// it as a def&use operand. Add a pseudo control edge from it to the other
1224 /// node (if it won't create a cycle) so the two-address one will be scheduled
1225 /// first (lower in the schedule). If both nodes are two-address, favor the
1226 /// one that has a CopyToReg use (more likely to be a loop induction update).
1227 /// If both are two-address, but one is commutable while the other is not
1228 /// commutable, favor the one that's not commutable.
1230 void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1231 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1232 SUnit *SU = &(*SUnits)[i];
1233 if (!SU->isTwoAddress)
1236 SDNode *Node = SU->getNode();
1237 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
1240 unsigned Opc = Node->getMachineOpcode();
1241 const TargetInstrDesc &TID = TII->get(Opc);
1242 unsigned NumRes = TID.getNumDefs();
1243 unsigned NumOps = TID.getNumOperands() - NumRes;
1244 for (unsigned j = 0; j != NumOps; ++j) {
1245 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
1247 SDNode *DU = SU->getNode()->getOperand(j).getNode();
1248 if (DU->getNodeId() == -1)
1250 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1251 if (!DUSU) continue;
1252 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1253 E = DUSU->Succs.end(); I != E; ++I) {
1254 if (I->isCtrl()) continue;
1255 SUnit *SuccSU = I->getSUnit();
1258 // Be conservative. Ignore if nodes aren't at roughly the same
1259 // depth and height.
1260 if (SuccSU->getHeight() < SU->getHeight() &&
1261 (SU->getHeight() - SuccSU->getHeight()) > 1)
1263 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
1265 // Don't constrain nodes with physical register defs if the
1266 // predecessor can clobber them.
1267 if (SuccSU->hasPhysRegDefs) {
1268 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1271 // Don't constrain extract_subreg / insert_subreg; these may be
1272 // coalesced away. We want them close to their uses.
1273 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
1274 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1275 SuccOpc == TargetInstrInfo::INSERT_SUBREG)
1277 if ((!canClobber(SuccSU, DUSU) ||
1278 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1279 (!SU->isCommutable && SuccSU->isCommutable)) &&
1280 !scheduleDAG->IsReachable(SuccSU, SU)) {
1281 DOUT << "Adding a pseudo-two-addr edge from SU # " << SU->NodeNum
1282 << " to SU #" << SuccSU->NodeNum << "\n";
1283 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
1284 /*Reg=*/0, /*isNormalMemory=*/false,
1285 /*isMustAlias=*/false,
1286 /*isArtificial=*/true));
1293 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1294 /// scheduling units.
1296 void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1297 SethiUllmanNumbers.assign(SUnits->size(), 0);
1299 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1300 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1303 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1304 /// predecessors of the successors of the SUnit SU. Stop when the provided
1305 /// limit is exceeded.
1306 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1309 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1311 const SUnit *SuccSU = I->getSUnit();
1312 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1313 EE = SuccSU->Preds.end(); II != EE; ++II) {
1314 SUnit *PredSU = II->getSUnit();
1315 if (!PredSU->isScheduled)
1325 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1326 unsigned LPriority = SPQ->getNodePriority(left);
1327 unsigned RPriority = SPQ->getNodePriority(right);
1328 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
1329 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
1330 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1331 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1332 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1333 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1335 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1337 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1344 if (left->NumSuccs == 1)
1346 if (right->NumSuccs == 1)
1349 if (LPriority+LBonus != RPriority+RBonus)
1350 return LPriority+LBonus < RPriority+RBonus;
1352 if (left->getDepth() != right->getDepth())
1353 return left->getDepth() < right->getDepth();
1355 if (left->NumSuccsLeft != right->NumSuccsLeft)
1356 return left->NumSuccsLeft > right->NumSuccsLeft;
1358 assert(left->NodeQueueId && right->NodeQueueId &&
1359 "NodeQueueId cannot be zero");
1360 return (left->NodeQueueId > right->NodeQueueId);
1363 //===----------------------------------------------------------------------===//
1364 // Public Constructor Functions
1365 //===----------------------------------------------------------------------===//
1367 llvm::ScheduleDAGSDNodes *
1368 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, bool) {
1369 const TargetMachine &TM = IS->TM;
1370 const TargetInstrInfo *TII = TM.getInstrInfo();
1371 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1373 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1375 ScheduleDAGRRList *SD =
1376 new ScheduleDAGRRList(*IS->MF, true, PQ);
1377 PQ->setScheduleDAG(SD);
1381 llvm::ScheduleDAGSDNodes *
1382 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, bool) {
1383 const TargetMachine &TM = IS->TM;
1384 const TargetInstrInfo *TII = TM.getInstrInfo();
1385 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1387 TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI);
1389 ScheduleDAGRRList *SD =
1390 new ScheduleDAGRRList(*IS->MF, false, PQ);
1391 PQ->setScheduleDAG(SD);