1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/CodeGen/SSARegMap.h"
22 #include "llvm/Target/MRegisterInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
33 #include "llvm/Support/CommandLine.h"
36 STATISTIC(NumBacktracks, "Number of times scheduler backtraced");
37 STATISTIC(NumDups, "Number of duplicated nodes");
38 STATISTIC(NumCCCopies, "Number of cross class copies");
40 static RegisterScheduler
41 burrListDAGScheduler("list-burr",
42 " Bottom-up register reduction list scheduling",
43 createBURRListDAGScheduler);
44 static RegisterScheduler
45 tdrListrDAGScheduler("list-tdrr",
46 " Top-down register reduction list scheduling",
47 createTDRRListDAGScheduler);
50 //===----------------------------------------------------------------------===//
51 /// ScheduleDAGRRList - The actual register reduction list scheduler
52 /// implementation. This supports both top-down and bottom-up scheduling.
54 class VISIBILITY_HIDDEN ScheduleDAGRRList : public ScheduleDAG {
56 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
60 /// AvailableQueue - The priority queue to use for the available SUnits.
62 SchedulingPriorityQueue *AvailableQueue;
64 /// LiveRegs / LiveRegDefs - A set of physical registers and their definition
65 /// that are "live". These nodes must be scheduled before any other nodes that
66 /// modifies the registers can be scheduled.
67 SmallSet<unsigned, 4> LiveRegs;
68 std::vector<SUnit*> LiveRegDefs;
69 std::vector<unsigned> LiveRegCycles;
72 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
73 const TargetMachine &tm, bool isbottomup,
74 SchedulingPriorityQueue *availqueue)
75 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
76 AvailableQueue(availqueue) {
79 ~ScheduleDAGRRList() {
80 delete AvailableQueue;
86 void ReleasePred(SUnit*, bool, unsigned);
87 void ReleaseSucc(SUnit*, bool isChain, unsigned);
88 void CapturePred(SUnit*, SUnit*, bool);
89 void ScheduleNodeBottomUp(SUnit*, unsigned);
90 void ScheduleNodeTopDown(SUnit*, unsigned);
91 void UnscheduleNodeBottomUp(SUnit*);
92 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
93 SUnit *CopyAndMoveSuccessors(SUnit*);
94 void InsertCCCopiesAndMoveSuccs(SUnit*, unsigned,
95 const TargetRegisterClass*,
96 const TargetRegisterClass*,
97 SmallVector<SUnit*, 2>&);
98 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
99 void ListScheduleTopDown();
100 void ListScheduleBottomUp();
101 void CommuteNodesToReducePressure();
103 } // end anonymous namespace
106 /// Schedule - Schedule the DAG using list scheduling.
107 void ScheduleDAGRRList::Schedule() {
108 DOUT << "********** List Scheduling **********\n";
110 LiveRegDefs.resize(MRI->getNumRegs(), NULL);
111 LiveRegCycles.resize(MRI->getNumRegs(), 0);
113 // Build scheduling units.
116 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
117 SUnits[su].dumpAll(&DAG));
121 AvailableQueue->initNodes(SUnitMap, SUnits);
123 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
125 ListScheduleBottomUp();
127 ListScheduleTopDown();
129 AvailableQueue->releaseState();
131 CommuteNodesToReducePressure();
133 DOUT << "*** Final schedule ***\n";
134 DEBUG(dumpSchedule());
137 // Emit in scheduled order
141 /// CommuteNodesToReducePressure - If a node is two-address and commutable, and
142 /// it is not the last use of its first operand, add it to the CommuteSet if
143 /// possible. It will be commuted when it is translated to a MI.
144 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
145 SmallPtrSet<SUnit*, 4> OperandSeen;
146 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
147 SUnit *SU = Sequence[i];
148 if (!SU || !SU->Node) continue;
149 if (SU->isCommutable) {
150 unsigned Opc = SU->Node->getTargetOpcode();
151 unsigned NumRes = TII->getNumDefs(Opc);
152 unsigned NumOps = CountOperands(SU->Node);
153 for (unsigned j = 0; j != NumOps; ++j) {
154 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) == -1)
157 SDNode *OpN = SU->Node->getOperand(j).Val;
158 SUnit *OpSU = SUnitMap[OpN][SU->InstanceNo];
159 if (OpSU && OperandSeen.count(OpSU) == 1) {
160 // Ok, so SU is not the last use of OpSU, but SU is two-address so
161 // it will clobber OpSU. Try to commute SU if no other source operands
163 bool DoCommute = true;
164 for (unsigned k = 0; k < NumOps; ++k) {
166 OpN = SU->Node->getOperand(k).Val;
167 OpSU = SUnitMap[OpN][SU->InstanceNo];
168 if (OpSU && OperandSeen.count(OpSU) == 1) {
175 CommuteSet.insert(SU->Node);
178 // Only look at the first use&def node for now.
183 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
186 OperandSeen.insert(I->Dep);
191 //===----------------------------------------------------------------------===//
192 // Bottom-Up Scheduling
193 //===----------------------------------------------------------------------===//
195 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
196 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
197 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
199 // FIXME: the distance between two nodes is not always == the predecessor's
200 // latency. For example, the reader can very well read the register written
201 // by the predecessor later than the issue cycle. It also depends on the
202 // interrupt model (drain vs. freeze).
203 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
205 --PredSU->NumSuccsLeft;
208 if (PredSU->NumSuccsLeft < 0) {
209 cerr << "*** List scheduling failed! ***\n";
211 cerr << " has been released too many times!\n";
216 if (PredSU->NumSuccsLeft == 0) {
217 // EntryToken has to go last! Special case it here.
218 if (!PredSU->Node || PredSU->Node->getOpcode() != ISD::EntryToken) {
219 PredSU->isAvailable = true;
220 AvailableQueue->push(PredSU);
225 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
226 /// count of its predecessors. If a predecessor pending count is zero, add it to
227 /// the Available queue.
228 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
229 DOUT << "*** Scheduling [" << CurCycle << "]: ";
230 DEBUG(SU->dump(&DAG));
231 SU->Cycle = CurCycle;
233 AvailableQueue->ScheduledNode(SU);
235 // Bottom up: release predecessors
236 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
238 ReleasePred(I->Dep, I->isCtrl, CurCycle);
240 // This is a physical register dependency and it's impossible or
241 // expensive to copy the register. Make sure nothing that can
242 // clobber the register is scheduled between the predecessor and
244 if (LiveRegs.insert(I->Reg)) {
245 LiveRegDefs[I->Reg] = I->Dep;
246 LiveRegCycles[I->Reg] = CurCycle;
251 // Release all the implicit physical register defs that are live.
252 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
255 if (LiveRegCycles[I->Reg] == I->Dep->Cycle) {
256 LiveRegs.erase(I->Reg);
257 assert(LiveRegDefs[I->Reg] == SU &&
258 "Physical register dependency violated?");
259 LiveRegDefs[I->Reg] = NULL;
260 LiveRegCycles[I->Reg] = 0;
265 SU->isScheduled = true;
268 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
269 /// unscheduled, incrcease the succ left count of its predecessors. Remove
270 /// them from AvailableQueue if necessary.
271 void ScheduleDAGRRList::CapturePred(SUnit *PredSU, SUnit *SU, bool isChain) {
272 PredSU->CycleBound = 0;
273 for (SUnit::succ_iterator I = PredSU->Succs.begin(), E = PredSU->Succs.end();
277 PredSU->CycleBound = std::max(PredSU->CycleBound,
278 I->Dep->Cycle + PredSU->Latency);
281 if (PredSU->isAvailable) {
282 PredSU->isAvailable = false;
283 if (!PredSU->isPending)
284 AvailableQueue->remove(PredSU);
287 ++PredSU->NumSuccsLeft;
290 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
291 /// its predecessor states to reflect the change.
292 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
293 DOUT << "*** Unscheduling [" << SU->Cycle << "]: ";
294 DEBUG(SU->dump(&DAG));
296 AvailableQueue->UnscheduledNode(SU);
298 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
300 CapturePred(I->Dep, SU, I->isCtrl);
301 if (I->Cost < 0 && SU->Cycle == LiveRegCycles[I->Reg]) {
302 LiveRegs.erase(I->Reg);
303 assert(LiveRegDefs[I->Reg] == I->Dep &&
304 "Physical register dependency violated?");
305 LiveRegDefs[I->Reg] = NULL;
306 LiveRegCycles[I->Reg] = 0;
310 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
313 if (LiveRegs.insert(I->Reg)) {
314 assert(!LiveRegDefs[I->Reg] &&
315 "Physical register dependency violated?");
316 LiveRegDefs[I->Reg] = SU;
318 if (I->Dep->Cycle < LiveRegCycles[I->Reg])
319 LiveRegCycles[I->Reg] = I->Dep->Cycle;
324 SU->isScheduled = false;
325 SU->isAvailable = true;
326 AvailableQueue->push(SU);
329 // FIXME: This is probably too slow!
330 static void isReachable(SUnit *SU, SUnit *TargetSU,
331 SmallPtrSet<SUnit*, 32> &Visited, bool &Reached) {
333 if (SU == TargetSU) {
337 if (!Visited.insert(SU)) return;
339 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end(); I != E;
341 isReachable(I->Dep, TargetSU, Visited, Reached);
344 static bool isReachable(SUnit *SU, SUnit *TargetSU) {
345 SmallPtrSet<SUnit*, 32> Visited;
346 bool Reached = false;
347 isReachable(SU, TargetSU, Visited, Reached);
351 /// willCreateCycle - Returns true if adding an edge from SU to TargetSU will
353 static bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
354 if (isReachable(TargetSU, SU))
356 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
358 if (I->Cost < 0 && isReachable(TargetSU, I->Dep))
363 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
364 /// BTCycle in order to schedule a specific node. Returns the last unscheduled
365 /// SUnit. Also returns if a successor is unscheduled in the process.
366 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
367 unsigned &CurCycle) {
369 while (CurCycle > BtCycle) {
370 OldSU = Sequence.back();
372 if (SU->isSucc(OldSU))
373 // Don't try to remove SU from AvailableQueue.
374 SU->isAvailable = false;
375 UnscheduleNodeBottomUp(OldSU);
380 if (SU->isSucc(OldSU)) {
381 assert(false && "Something is wrong!");
388 /// isSafeToCopy - True if the SUnit for the given SDNode can safely cloned,
389 /// i.e. the node does not produce a flag, it does not read a flag and it does
390 /// not have an incoming chain.
391 static bool isSafeToCopy(SDNode *N) {
395 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i)
396 if (N->getValueType(i) == MVT::Flag)
398 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
399 const SDOperand &Op = N->getOperand(i);
400 MVT::ValueType VT = Op.Val->getValueType(Op.ResNo);
401 if (VT == MVT::Other || VT == MVT::Flag)
408 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
409 /// successors to the newly created node.
410 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
411 DOUT << "Duplicating SU # " << SU->NodeNum << "\n";
413 SUnit *NewSU = Clone(SU);
415 // New SUnit has the exact same predecessors.
416 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
419 NewSU->addPred(I->Dep, I->isCtrl, false, I->Reg, I->Cost);
420 NewSU->Depth = std::max(NewSU->Depth, I->Dep->Depth+1);
423 // Only copy scheduled successors. Cut them from old node's successor
424 // list and move them over.
425 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
426 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
430 if (I->Dep->isScheduled) {
431 NewSU->Height = std::max(NewSU->Height, I->Dep->Height+1);
432 I->Dep->addPred(NewSU, I->isCtrl, false, I->Reg, I->Cost);
433 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
436 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
437 SUnit *Succ = DelDeps[i].first;
438 bool isCtrl = DelDeps[i].second;
439 Succ->removePred(SU, isCtrl, false);
442 AvailableQueue->updateNode(SU);
443 AvailableQueue->addNode(NewSU);
449 /// InsertCCCopiesAndMoveSuccs - Insert expensive cross register class copies
450 /// and move all scheduled successors of the given SUnit to the last copy.
451 void ScheduleDAGRRList::InsertCCCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
452 const TargetRegisterClass *DestRC,
453 const TargetRegisterClass *SrcRC,
454 SmallVector<SUnit*, 2> &Copies) {
455 SUnit *CopyFromSU = NewSUnit(NULL);
456 CopyFromSU->CopySrcRC = SrcRC;
457 CopyFromSU->CopyDstRC = DestRC;
458 CopyFromSU->Depth = SU->Depth;
459 CopyFromSU->Height = SU->Height;
461 SUnit *CopyToSU = NewSUnit(NULL);
462 CopyToSU->CopySrcRC = DestRC;
463 CopyToSU->CopyDstRC = SrcRC;
465 // Only copy scheduled successors. Cut them from old node's successor
466 // list and move them over.
467 SmallVector<std::pair<SUnit*, bool>, 4> DelDeps;
468 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
472 if (I->Dep->isScheduled) {
473 CopyToSU->Height = std::max(CopyToSU->Height, I->Dep->Height+1);
474 I->Dep->addPred(CopyToSU, I->isCtrl, false, I->Reg, I->Cost);
475 DelDeps.push_back(std::make_pair(I->Dep, I->isCtrl));
478 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i) {
479 SUnit *Succ = DelDeps[i].first;
480 bool isCtrl = DelDeps[i].second;
481 Succ->removePred(SU, isCtrl, false);
484 CopyFromSU->addPred(SU, false, false, Reg, -1);
485 CopyToSU->addPred(CopyFromSU, false, false, Reg, 1);
487 AvailableQueue->updateNode(SU);
488 AvailableQueue->addNode(CopyFromSU);
489 AvailableQueue->addNode(CopyToSU);
490 Copies.push_back(CopyFromSU);
491 Copies.push_back(CopyToSU);
496 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
497 /// definition of the specified node.
498 /// FIXME: Move to SelectionDAG?
499 static MVT::ValueType getPhysicalRegisterVT(SDNode *N, unsigned Reg,
500 const TargetInstrInfo *TII) {
501 const TargetInstrDescriptor &TID = TII->get(N->getTargetOpcode());
502 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
503 unsigned NumRes = TID.numDefs;
504 for (const unsigned *ImpDef = TID.ImplicitDefs; *ImpDef; ++ImpDef) {
509 return N->getValueType(NumRes);
512 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
513 /// scheduling of the given node to satisfy live physical register dependencies.
514 /// If the specific node is the last one that's available to schedule, do
515 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
516 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
517 SmallVector<unsigned, 4> &LRegs){
518 if (LiveRegs.empty())
521 SmallSet<unsigned, 4> RegAdded;
522 // If this node would clobber any "live" register, then it's not ready.
523 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
526 unsigned Reg = I->Reg;
527 if (LiveRegs.count(Reg) && LiveRegDefs[Reg] != I->Dep) {
528 if (RegAdded.insert(Reg))
529 LRegs.push_back(Reg);
531 for (const unsigned *Alias = MRI->getAliasSet(Reg);
533 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != I->Dep) {
534 if (RegAdded.insert(*Alias))
535 LRegs.push_back(*Alias);
540 for (unsigned i = 0, e = SU->FlaggedNodes.size()+1; i != e; ++i) {
541 SDNode *Node = (i == 0) ? SU->Node : SU->FlaggedNodes[i-1];
542 if (!Node || !Node->isTargetOpcode())
544 const TargetInstrDescriptor &TID = TII->get(Node->getTargetOpcode());
545 if (!TID.ImplicitDefs)
547 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg) {
548 if (LiveRegs.count(*Reg) && LiveRegDefs[*Reg] != SU) {
549 if (RegAdded.insert(*Reg))
550 LRegs.push_back(*Reg);
552 for (const unsigned *Alias = MRI->getAliasSet(*Reg);
554 if (LiveRegs.count(*Alias) && LiveRegDefs[*Alias] != SU) {
555 if (RegAdded.insert(*Alias))
556 LRegs.push_back(*Alias);
560 return !LRegs.empty();
564 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
566 void ScheduleDAGRRList::ListScheduleBottomUp() {
567 unsigned CurCycle = 0;
568 // Add root to Available queue.
569 SUnit *RootSU = SUnitMap[DAG.getRoot().Val].front();
570 RootSU->isAvailable = true;
571 AvailableQueue->push(RootSU);
573 // While Available queue is not empty, grab the node with the highest
574 // priority. If it is not ready put it back. Schedule the node.
575 SmallVector<SUnit*, 4> NotReady;
576 while (!AvailableQueue->empty()) {
577 bool Delayed = false;
578 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
579 SUnit *CurSU = AvailableQueue->pop();
581 if (CurSU->CycleBound <= CurCycle) {
582 SmallVector<unsigned, 4> LRegs;
583 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
586 LRegsMap.insert(std::make_pair(CurSU, LRegs));
589 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
590 NotReady.push_back(CurSU);
591 CurSU = AvailableQueue->pop();
594 // All candidates are delayed due to live physical reg dependencies.
595 // Try backtracking, code duplication, or inserting cross class copies
597 if (Delayed && !CurSU) {
598 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
599 SUnit *TrySU = NotReady[i];
600 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
602 // Try unscheduling up to the point where it's safe to schedule
604 unsigned LiveCycle = CurCycle;
605 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
606 unsigned Reg = LRegs[j];
607 unsigned LCycle = LiveRegCycles[Reg];
608 LiveCycle = std::min(LiveCycle, LCycle);
610 SUnit *OldSU = Sequence[LiveCycle];
611 if (!WillCreateCycle(TrySU, OldSU)) {
612 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
613 // Force the current node to be scheduled before the node that
614 // requires the physical reg dep.
615 if (OldSU->isAvailable) {
616 OldSU->isAvailable = false;
617 AvailableQueue->remove(OldSU);
619 TrySU->addPred(OldSU, true, true);
620 // If one or more successors has been unscheduled, then the current
621 // node is no longer avaialable. Schedule a successor that's now
622 // available instead.
623 if (!TrySU->isAvailable)
624 CurSU = AvailableQueue->pop();
627 TrySU->isPending = false;
628 NotReady.erase(NotReady.begin()+i);
635 // Can't backtrace. Try duplicating the nodes that produces these
636 // "expensive to copy" values to break the dependency. In case even
637 // that doesn't work, insert cross class copies.
638 SUnit *TrySU = NotReady[0];
639 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
640 assert(LRegs.size() == 1 && "Can't handle this yet!");
641 unsigned Reg = LRegs[0];
642 SUnit *LRDef = LiveRegDefs[Reg];
644 if (isSafeToCopy(LRDef->Node))
645 NewDef = CopyAndMoveSuccessors(LRDef);
647 // Issue expensive cross register class copies.
648 MVT::ValueType VT = getPhysicalRegisterVT(LRDef->Node, Reg, TII);
649 const TargetRegisterClass *RC =
650 MRI->getPhysicalRegisterRegClass(VT, Reg);
651 const TargetRegisterClass *DestRC = MRI->getCrossCopyRegClass(RC);
653 assert(false && "Don't know how to copy this physical register!");
656 SmallVector<SUnit*, 2> Copies;
657 InsertCCCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
658 DOUT << "Adding an edge from SU # " << TrySU->NodeNum
659 << " to SU #" << Copies.front()->NodeNum << "\n";
660 TrySU->addPred(Copies.front(), true, true);
661 NewDef = Copies.back();
664 DOUT << "Adding an edge from SU # " << NewDef->NodeNum
665 << " to SU #" << TrySU->NodeNum << "\n";
666 LiveRegDefs[Reg] = NewDef;
667 NewDef->addPred(TrySU, true, true);
668 TrySU->isAvailable = false;
673 assert(false && "Unable to resolve live physical register dependencies!");
678 // Add the nodes that aren't ready back onto the available list.
679 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
680 NotReady[i]->isPending = false;
681 // May no longer be available due to backtracking.
682 if (NotReady[i]->isAvailable)
683 AvailableQueue->push(NotReady[i]);
688 Sequence.push_back(0);
690 ScheduleNodeBottomUp(CurSU, CurCycle);
691 Sequence.push_back(CurSU);
696 // Add entry node last
697 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
698 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
699 Sequence.push_back(Entry);
702 // Reverse the order if it is bottom up.
703 std::reverse(Sequence.begin(), Sequence.end());
707 // Verify that all SUnits were scheduled.
708 bool AnyNotSched = false;
709 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
710 if (SUnits[i].NumSuccsLeft != 0) {
712 cerr << "*** List scheduling failed! ***\n";
713 SUnits[i].dump(&DAG);
714 cerr << "has not been scheduled!\n";
718 assert(!AnyNotSched);
722 //===----------------------------------------------------------------------===//
723 // Top-Down Scheduling
724 //===----------------------------------------------------------------------===//
726 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
727 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
728 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
730 // FIXME: the distance between two nodes is not always == the predecessor's
731 // latency. For example, the reader can very well read the register written
732 // by the predecessor later than the issue cycle. It also depends on the
733 // interrupt model (drain vs. freeze).
734 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
736 --SuccSU->NumPredsLeft;
739 if (SuccSU->NumPredsLeft < 0) {
740 cerr << "*** List scheduling failed! ***\n";
742 cerr << " has been released too many times!\n";
747 if (SuccSU->NumPredsLeft == 0) {
748 SuccSU->isAvailable = true;
749 AvailableQueue->push(SuccSU);
754 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
755 /// count of its successors. If a successor pending count is zero, add it to
756 /// the Available queue.
757 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
758 DOUT << "*** Scheduling [" << CurCycle << "]: ";
759 DEBUG(SU->dump(&DAG));
760 SU->Cycle = CurCycle;
762 AvailableQueue->ScheduledNode(SU);
764 // Top down: release successors
765 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
767 ReleaseSucc(I->Dep, I->isCtrl, CurCycle);
768 SU->isScheduled = true;
771 /// ListScheduleTopDown - The main loop of list scheduling for top-down
773 void ScheduleDAGRRList::ListScheduleTopDown() {
774 unsigned CurCycle = 0;
775 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val].front();
777 // All leaves to Available queue.
778 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
779 // It is available if it has no predecessors.
780 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
781 AvailableQueue->push(&SUnits[i]);
782 SUnits[i].isAvailable = true;
786 // Emit the entry node first.
787 ScheduleNodeTopDown(Entry, CurCycle);
788 Sequence.push_back(Entry);
791 // While Available queue is not empty, grab the node with the highest
792 // priority. If it is not ready put it back. Schedule the node.
793 std::vector<SUnit*> NotReady;
794 while (!AvailableQueue->empty()) {
795 SUnit *CurSU = AvailableQueue->pop();
796 while (CurSU && CurSU->CycleBound > CurCycle) {
797 NotReady.push_back(CurSU);
798 CurSU = AvailableQueue->pop();
801 // Add the nodes that aren't ready back onto the available list.
802 AvailableQueue->push_all(NotReady);
806 Sequence.push_back(0);
808 ScheduleNodeTopDown(CurSU, CurCycle);
809 Sequence.push_back(CurSU);
816 // Verify that all SUnits were scheduled.
817 bool AnyNotSched = false;
818 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
819 if (!SUnits[i].isScheduled) {
821 cerr << "*** List scheduling failed! ***\n";
822 SUnits[i].dump(&DAG);
823 cerr << "has not been scheduled!\n";
827 assert(!AnyNotSched);
833 //===----------------------------------------------------------------------===//
834 // RegReductionPriorityQueue Implementation
835 //===----------------------------------------------------------------------===//
837 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
838 // to reduce register pressure.
842 class RegReductionPriorityQueue;
844 /// Sorting functions for the Available queue.
845 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
846 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
847 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
848 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
850 bool operator()(const SUnit* left, const SUnit* right) const;
853 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
854 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
855 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
856 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
858 bool operator()(const SUnit* left, const SUnit* right) const;
860 } // end anonymous namespace
862 static inline bool isCopyFromLiveIn(const SUnit *SU) {
863 SDNode *N = SU->Node;
864 return N && N->getOpcode() == ISD::CopyFromReg &&
865 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
870 class VISIBILITY_HIDDEN RegReductionPriorityQueue
871 : public SchedulingPriorityQueue {
872 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
875 RegReductionPriorityQueue() :
878 virtual void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
879 std::vector<SUnit> &sunits) {}
881 virtual void addNode(const SUnit *SU) {}
883 virtual void updateNode(const SUnit *SU) {}
885 virtual void releaseState() {}
887 virtual unsigned getNodePriority(const SUnit *SU) const {
891 unsigned size() const { return Queue.size(); }
893 bool empty() const { return Queue.empty(); }
895 void push(SUnit *U) {
898 void push_all(const std::vector<SUnit *> &Nodes) {
899 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
900 Queue.push(Nodes[i]);
904 if (empty()) return NULL;
905 SUnit *V = Queue.top();
910 /// remove - This is a really inefficient way to remove a node from a
911 /// priority queue. We should roll our own heap to make this better or
913 void remove(SUnit *SU) {
914 std::vector<SUnit*> Temp;
916 assert(!Queue.empty() && "Not in queue!");
917 while (Queue.top() != SU) {
918 Temp.push_back(Queue.top());
920 assert(!Queue.empty() && "Not in queue!");
923 // Remove the node from the PQ.
926 // Add all the other nodes back.
927 for (unsigned i = 0, e = Temp.size(); i != e; ++i)
933 class VISIBILITY_HIDDEN BURegReductionPriorityQueue
934 : public RegReductionPriorityQueue<SF> {
935 // SUnitMap SDNode to SUnit mapping (n -> n).
936 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
938 // SUnits - The SUnits for the current graph.
939 const std::vector<SUnit> *SUnits;
941 // SethiUllmanNumbers - The SethiUllman number for each node.
942 std::vector<unsigned> SethiUllmanNumbers;
944 const TargetInstrInfo *TII;
946 explicit BURegReductionPriorityQueue(const TargetInstrInfo *tii)
949 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
950 std::vector<SUnit> &sunits) {
953 // Add pseudo dependency edges for two-address nodes.
954 AddPseudoTwoAddrDeps();
955 // Calculate node priorities.
956 CalculateSethiUllmanNumbers();
959 void addNode(const SUnit *SU) {
960 SethiUllmanNumbers.resize(SUnits->size(), 0);
961 CalcNodeSethiUllmanNumber(SU);
964 void updateNode(const SUnit *SU) {
965 SethiUllmanNumbers[SU->NodeNum] = 0;
966 CalcNodeSethiUllmanNumber(SU);
969 void releaseState() {
971 SethiUllmanNumbers.clear();
974 unsigned getNodePriority(const SUnit *SU) const {
975 assert(SU->NodeNum < SethiUllmanNumbers.size());
976 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
977 if (Opc == ISD::CopyFromReg && !isCopyFromLiveIn(SU))
978 // CopyFromReg should be close to its def because it restricts
979 // allocation choices. But if it is a livein then perhaps we want it
980 // closer to its uses so it can be coalesced.
982 else if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
983 // CopyToReg should be close to its uses to facilitate coalescing and
986 else if (SU->NumSuccs == 0)
987 // If SU does not have a use, i.e. it doesn't produce a value that would
988 // be consumed (e.g. store), then it terminates a chain of computation.
989 // Give it a large SethiUllman number so it will be scheduled right
990 // before its predecessors that it doesn't lengthen their live ranges.
992 else if (SU->NumPreds == 0)
993 // If SU does not have a def, schedule it close to its uses because it
994 // does not lengthen any live ranges.
997 return SethiUllmanNumbers[SU->NodeNum];
1001 bool canClobber(SUnit *SU, SUnit *Op);
1002 void AddPseudoTwoAddrDeps();
1003 void CalculateSethiUllmanNumbers();
1004 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1009 class VISIBILITY_HIDDEN TDRegReductionPriorityQueue
1010 : public RegReductionPriorityQueue<SF> {
1011 // SUnitMap SDNode to SUnit mapping (n -> n).
1012 DenseMap<SDNode*, std::vector<SUnit*> > *SUnitMap;
1014 // SUnits - The SUnits for the current graph.
1015 const std::vector<SUnit> *SUnits;
1017 // SethiUllmanNumbers - The SethiUllman number for each node.
1018 std::vector<unsigned> SethiUllmanNumbers;
1021 TDRegReductionPriorityQueue() {}
1023 void initNodes(DenseMap<SDNode*, std::vector<SUnit*> > &sumap,
1024 std::vector<SUnit> &sunits) {
1027 // Calculate node priorities.
1028 CalculateSethiUllmanNumbers();
1031 void addNode(const SUnit *SU) {
1032 SethiUllmanNumbers.resize(SUnits->size(), 0);
1033 CalcNodeSethiUllmanNumber(SU);
1036 void updateNode(const SUnit *SU) {
1037 SethiUllmanNumbers[SU->NodeNum] = 0;
1038 CalcNodeSethiUllmanNumber(SU);
1041 void releaseState() {
1043 SethiUllmanNumbers.clear();
1046 unsigned getNodePriority(const SUnit *SU) const {
1047 assert(SU->NodeNum < SethiUllmanNumbers.size());
1048 return SethiUllmanNumbers[SU->NodeNum];
1052 void CalculateSethiUllmanNumbers();
1053 unsigned CalcNodeSethiUllmanNumber(const SUnit *SU);
1057 /// closestSucc - Returns the scheduled cycle of the successor which is
1058 /// closet to the current cycle.
1059 static unsigned closestSucc(const SUnit *SU) {
1060 unsigned MaxCycle = 0;
1061 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1063 unsigned Cycle = I->Dep->Cycle;
1064 // If there are bunch of CopyToRegs stacked up, they should be considered
1065 // to be at the same position.
1066 if (I->Dep->Node && I->Dep->Node->getOpcode() == ISD::CopyToReg)
1067 Cycle = closestSucc(I->Dep)+1;
1068 if (Cycle > MaxCycle)
1074 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1075 /// for scratch registers. Live-in operands and live-out results don't count
1076 /// since they are "fixed".
1077 static unsigned calcMaxScratches(const SUnit *SU) {
1078 unsigned Scratches = 0;
1079 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1081 if (I->isCtrl) continue; // ignore chain preds
1082 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyFromReg)
1085 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1087 if (I->isCtrl) continue; // ignore chain succs
1088 if (!I->Dep->Node || I->Dep->Node->getOpcode() != ISD::CopyToReg)
1095 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1096 // There used to be a special tie breaker here that looked for
1097 // two-address instructions and preferred the instruction with a
1098 // def&use operand. The special case triggered diagnostics when
1099 // _GLIBCXX_DEBUG was enabled because it broke the strict weak
1100 // ordering that priority_queue requires. It didn't help much anyway
1101 // because AddPseudoTwoAddrDeps already covers many of the cases
1102 // where it would have applied. In addition, it's counter-intuitive
1103 // that a tie breaker would be the first thing attempted. There's a
1104 // "real" tie breaker below that is the operation of last resort.
1105 // The fact that the "special tie breaker" would trigger when there
1106 // wasn't otherwise a tie is what broke the strict weak ordering
1109 unsigned LPriority = SPQ->getNodePriority(left);
1110 unsigned RPriority = SPQ->getNodePriority(right);
1111 if (LPriority > RPriority)
1113 else if (LPriority == RPriority) {
1114 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1119 // and the following instructions are both ready.
1123 // Then schedule t2 = op first.
1130 // This creates more short live intervals.
1131 unsigned LDist = closestSucc(left);
1132 unsigned RDist = closestSucc(right);
1135 else if (LDist == RDist) {
1136 // Intuitively, it's good to push down instructions whose results are
1137 // liveout so their long live ranges won't conflict with other values
1138 // which are needed inside the BB. Further prioritize liveout instructions
1139 // by the number of operands which are calculated within the BB.
1140 unsigned LScratch = calcMaxScratches(left);
1141 unsigned RScratch = calcMaxScratches(right);
1142 if (LScratch > RScratch)
1144 else if (LScratch == RScratch)
1145 if (left->Height > right->Height)
1147 else if (left->Height == right->Height)
1148 if (left->Depth < right->Depth)
1150 else if (left->Depth == right->Depth)
1151 if (left->CycleBound > right->CycleBound)
1159 bool BURegReductionPriorityQueue<SF>::canClobber(SUnit *SU, SUnit *Op) {
1160 if (SU->isTwoAddress) {
1161 unsigned Opc = SU->Node->getTargetOpcode();
1162 unsigned NumRes = TII->getNumDefs(Opc);
1163 unsigned NumOps = ScheduleDAG::CountOperands(SU->Node);
1164 for (unsigned i = 0; i != NumOps; ++i) {
1165 if (TII->getOperandConstraint(Opc, i+NumRes, TOI::TIED_TO) != -1) {
1166 SDNode *DU = SU->Node->getOperand(i).Val;
1167 if (Op == (*SUnitMap)[DU][SU->InstanceNo])
1176 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1177 /// it as a def&use operand. Add a pseudo control edge from it to the other
1178 /// node (if it won't create a cycle) so the two-address one will be scheduled
1179 /// first (lower in the schedule).
1181 void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1182 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1183 SUnit *SU = (SUnit *)&((*SUnits)[i]);
1184 if (!SU->isTwoAddress)
1187 SDNode *Node = SU->Node;
1188 if (!Node || !Node->isTargetOpcode())
1191 unsigned Opc = Node->getTargetOpcode();
1192 unsigned NumRes = TII->getNumDefs(Opc);
1193 unsigned NumOps = ScheduleDAG::CountOperands(Node);
1194 for (unsigned j = 0; j != NumOps; ++j) {
1195 if (TII->getOperandConstraint(Opc, j+NumRes, TOI::TIED_TO) != -1) {
1196 SDNode *DU = SU->Node->getOperand(j).Val;
1197 SUnit *DUSU = (*SUnitMap)[DU][SU->InstanceNo];
1198 if (!DUSU) continue;
1199 for (SUnit::succ_iterator I = DUSU->Succs.begin(),E = DUSU->Succs.end();
1201 if (I->isCtrl) continue;
1202 SUnit *SuccSU = I->Dep;
1203 // Don't constraint nodes with implicit defs. It can create cycles
1204 // plus it may increase register pressures.
1205 if (SuccSU == SU || SuccSU->hasImplicitDefs)
1207 // Be conservative. Ignore if nodes aren't at the same depth.
1208 if (SuccSU->Depth != SU->Depth)
1210 if ((!canClobber(SuccSU, DUSU) ||
1211 (!SU->isCommutable && SuccSU->isCommutable)) &&
1212 !isReachable(SuccSU, SU)) {
1213 DOUT << "Adding an edge from SU # " << SU->NodeNum
1214 << " to SU #" << SuccSU->NodeNum << "\n";
1215 SU->addPred(SuccSU, true, true);
1223 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1224 /// Smaller number is the higher priority.
1226 unsigned BURegReductionPriorityQueue<SF>::
1227 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1228 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1229 if (SethiUllmanNumber != 0)
1230 return SethiUllmanNumber;
1233 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1235 if (I->isCtrl) continue; // ignore chain preds
1236 SUnit *PredSU = I->Dep;
1237 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1238 if (PredSethiUllman > SethiUllmanNumber) {
1239 SethiUllmanNumber = PredSethiUllman;
1241 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1245 SethiUllmanNumber += Extra;
1247 if (SethiUllmanNumber == 0)
1248 SethiUllmanNumber = 1;
1250 return SethiUllmanNumber;
1253 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1254 /// scheduling units.
1256 void BURegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1257 SethiUllmanNumbers.assign(SUnits->size(), 0);
1259 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1260 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1263 static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
1265 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1267 SUnit *SuccSU = I->Dep;
1268 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1269 EE = SuccSU->Preds.end(); II != EE; ++II) {
1270 SUnit *PredSU = II->Dep;
1271 if (!PredSU->isScheduled)
1281 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1282 unsigned LPriority = SPQ->getNodePriority(left);
1283 unsigned RPriority = SPQ->getNodePriority(right);
1284 bool LIsTarget = left->Node && left->Node->isTargetOpcode();
1285 bool RIsTarget = right->Node && right->Node->isTargetOpcode();
1286 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1287 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1288 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
1289 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
1291 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1293 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1296 // Special tie breaker: if two nodes share a operand, the one that use it
1297 // as a def&use operand is preferred.
1298 if (LIsTarget && RIsTarget) {
1299 if (left->isTwoAddress && !right->isTwoAddress) {
1300 SDNode *DUNode = left->Node->getOperand(0).Val;
1301 if (DUNode->isOperand(right->Node))
1304 if (!left->isTwoAddress && right->isTwoAddress) {
1305 SDNode *DUNode = right->Node->getOperand(0).Val;
1306 if (DUNode->isOperand(left->Node))
1314 if (left->NumSuccs == 1)
1316 if (right->NumSuccs == 1)
1319 if (LPriority+LBonus < RPriority+RBonus)
1321 else if (LPriority == RPriority)
1322 if (left->Depth < right->Depth)
1324 else if (left->Depth == right->Depth)
1325 if (left->NumSuccsLeft > right->NumSuccsLeft)
1327 else if (left->NumSuccsLeft == right->NumSuccsLeft)
1328 if (left->CycleBound > right->CycleBound)
1333 /// CalcNodeSethiUllmanNumber - Priority is the Sethi Ullman number.
1334 /// Smaller number is the higher priority.
1336 unsigned TDRegReductionPriorityQueue<SF>::
1337 CalcNodeSethiUllmanNumber(const SUnit *SU) {
1338 unsigned &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
1339 if (SethiUllmanNumber != 0)
1340 return SethiUllmanNumber;
1342 unsigned Opc = SU->Node ? SU->Node->getOpcode() : 0;
1343 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1344 SethiUllmanNumber = 0xffff;
1345 else if (SU->NumSuccsLeft == 0)
1346 // If SU does not have a use, i.e. it doesn't produce a value that would
1347 // be consumed (e.g. store), then it terminates a chain of computation.
1348 // Give it a small SethiUllman number so it will be scheduled right before
1349 // its predecessors that it doesn't lengthen their live ranges.
1350 SethiUllmanNumber = 0;
1351 else if (SU->NumPredsLeft == 0 &&
1352 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
1353 SethiUllmanNumber = 0xffff;
1356 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1358 if (I->isCtrl) continue; // ignore chain preds
1359 SUnit *PredSU = I->Dep;
1360 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU);
1361 if (PredSethiUllman > SethiUllmanNumber) {
1362 SethiUllmanNumber = PredSethiUllman;
1364 } else if (PredSethiUllman == SethiUllmanNumber && !I->isCtrl)
1368 SethiUllmanNumber += Extra;
1371 return SethiUllmanNumber;
1374 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1375 /// scheduling units.
1377 void TDRegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1378 SethiUllmanNumbers.assign(SUnits->size(), 0);
1380 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1381 CalcNodeSethiUllmanNumber(&(*SUnits)[i]);
1384 //===----------------------------------------------------------------------===//
1385 // Public Constructor Functions
1386 //===----------------------------------------------------------------------===//
1388 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
1390 MachineBasicBlock *BB) {
1391 const TargetInstrInfo *TII = DAG->getTarget().getInstrInfo();
1392 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), true,
1393 new BURegReductionPriorityQueue<bu_ls_rr_sort>(TII));
1396 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
1398 MachineBasicBlock *BB) {
1399 return new ScheduleDAGRRList(*DAG, BB, DAG->getTarget(), false,
1400 new TDRegReductionPriorityQueue<td_ls_rr_sort>());