1 //===----- ScheduleDAGList.cpp - Reg pressure reduction list scheduler ----===//
3 // The LLVM Compiler Infrastructure
5 // This file was developed by Evan Cheng and is distributed under the
6 // University of Illinois Open Source License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "sched"
19 #include "llvm/CodeGen/ScheduleDAG.h"
20 #include "llvm/CodeGen/SSARegMap.h"
21 #include "llvm/Target/MRegisterInfo.h"
22 #include "llvm/Target/TargetData.h"
23 #include "llvm/Target/TargetMachine.h"
24 #include "llvm/Target/TargetInstrInfo.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/ADT/Statistic.h"
30 #include "llvm/Support/CommandLine.h"
34 //===----------------------------------------------------------------------===//
35 /// ScheduleDAGRRList - The actual register reduction list scheduler
36 /// implementation. This supports both top-down and bottom-up scheduling.
39 class ScheduleDAGRRList : public ScheduleDAG {
41 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
45 /// AvailableQueue - The priority queue to use for the available SUnits.
47 SchedulingPriorityQueue *AvailableQueue;
50 ScheduleDAGRRList(SelectionDAG &dag, MachineBasicBlock *bb,
51 const TargetMachine &tm, bool isbottomup,
52 SchedulingPriorityQueue *availqueue)
53 : ScheduleDAG(dag, bb, tm), isBottomUp(isbottomup),
54 AvailableQueue(availqueue) {
57 ~ScheduleDAGRRList() {
58 delete AvailableQueue;
64 void ReleasePred(SUnit *PredSU, bool isChain, unsigned CurCycle);
65 void ReleaseSucc(SUnit *SuccSU, bool isChain, unsigned CurCycle);
66 void ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle);
67 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
68 void ListScheduleTopDown();
69 void ListScheduleBottomUp();
70 void CommuteNodesToReducePressure();
72 } // end anonymous namespace
75 /// Schedule - Schedule the DAG using list scheduling.
76 void ScheduleDAGRRList::Schedule() {
77 DEBUG(std::cerr << "********** List Scheduling **********\n");
79 // Build scheduling units.
84 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
85 SUnits[su].dumpAll(&DAG));
87 AvailableQueue->initNodes(SUnits);
89 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
91 ListScheduleBottomUp();
93 ListScheduleTopDown();
95 AvailableQueue->releaseState();
97 CommuteNodesToReducePressure();
99 DEBUG(std::cerr << "*** Final schedule ***\n");
100 DEBUG(dumpSchedule());
101 DEBUG(std::cerr << "\n");
103 // Emit in scheduled order
107 /// CommuteNodesToReducePressure - Is a node is two-address and commutable, and
108 /// it is not the last use of its first operand, add it to the CommuteSet if
109 /// possible. It will be commuted when it is translated to a MI.
110 void ScheduleDAGRRList::CommuteNodesToReducePressure() {
111 std::set<SUnit *> OperandSeen;
112 for (unsigned i = Sequence.size()-1; i != 0; --i) { // Ignore first node.
113 SUnit *SU = Sequence[i];
115 if (SU->isTwoAddress && SU->isCommutable) {
116 SDNode *OpN = SU->Node->getOperand(0).Val;
117 SUnit *OpSU = SUnitMap[OpN];
118 if (OpSU && OperandSeen.count(OpSU) == 1) {
119 // Ok, so SU is not the last use of OpSU, but SU is two-address so
120 // it will clobber OpSU. Try to commute it if possible.
121 bool DoCommute = true;
122 for (unsigned j = 1, e = SU->Node->getNumOperands(); j != e; ++j) {
123 OpN = SU->Node->getOperand(j).Val;
124 OpSU = SUnitMap[OpN];
125 if (OpSU && OperandSeen.count(OpSU) == 1) {
131 CommuteSet.insert(SU->Node);
135 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
136 E = SU->Preds.end(); I != E; ++I) {
138 OperandSeen.insert(I->first);
143 //===----------------------------------------------------------------------===//
144 // Bottom-Up Scheduling
145 //===----------------------------------------------------------------------===//
147 static const TargetRegisterClass *getRegClass(SUnit *SU,
148 const TargetInstrInfo *TII,
149 const MRegisterInfo *MRI,
151 if (SU->Node->isTargetOpcode()) {
152 unsigned Opc = SU->Node->getTargetOpcode();
153 const TargetInstrDescriptor &II = TII->get(Opc);
154 return II.OpInfo->RegClass;
156 assert(SU->Node->getOpcode() == ISD::CopyFromReg);
157 unsigned SrcReg = cast<RegisterSDNode>(SU->Node->getOperand(1))->getReg();
158 if (MRegisterInfo::isVirtualRegister(SrcReg))
159 return RegMap->getRegClass(SrcReg);
161 for (MRegisterInfo::regclass_iterator I = MRI->regclass_begin(),
162 E = MRI->regclass_end(); I != E; ++I)
163 if ((*I)->hasType(SU->Node->getValueType(0)) &&
164 (*I)->contains(SrcReg))
166 assert(false && "Couldn't find register class for reg copy!");
172 static unsigned getNumResults(SUnit *SU) {
173 unsigned NumResults = 0;
174 for (unsigned i = 0, e = SU->Node->getNumValues(); i != e; ++i) {
175 MVT::ValueType VT = SU->Node->getValueType(i);
176 if (VT != MVT::Other && VT != MVT::Flag)
182 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
183 /// the Available queue is the count reaches zero. Also update its cycle bound.
184 void ScheduleDAGRRList::ReleasePred(SUnit *PredSU, bool isChain,
186 // FIXME: the distance between two nodes is not always == the predecessor's
187 // latency. For example, the reader can very well read the register written
188 // by the predecessor later than the issue cycle. It also depends on the
189 // interrupt model (drain vs. freeze).
190 PredSU->CycleBound = std::max(PredSU->CycleBound, CurCycle + PredSU->Latency);
193 PredSU->NumSuccsLeft--;
195 PredSU->NumChainSuccsLeft--;
198 if (PredSU->NumSuccsLeft < 0 || PredSU->NumChainSuccsLeft < 0) {
199 std::cerr << "*** List scheduling failed! ***\n";
201 std::cerr << " has been released too many times!\n";
206 if ((PredSU->NumSuccsLeft + PredSU->NumChainSuccsLeft) == 0) {
207 // EntryToken has to go last! Special case it here.
208 if (PredSU->Node->getOpcode() != ISD::EntryToken) {
209 PredSU->isAvailable = true;
210 AvailableQueue->push(PredSU);
215 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
216 /// count of its predecessors. If a predecessor pending count is zero, add it to
217 /// the Available queue.
218 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
219 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
220 DEBUG(SU->dump(&DAG));
221 SU->Cycle = CurCycle;
223 AvailableQueue->ScheduledNode(SU);
224 Sequence.push_back(SU);
226 // Bottom up: release predecessors
227 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
228 E = SU->Preds.end(); I != E; ++I)
229 ReleasePred(I->first, I->second, CurCycle);
230 SU->isScheduled = true;
233 /// isReady - True if node's lower cycle bound is less or equal to the current
234 /// scheduling cycle. Always true if all nodes have uniform latency 1.
235 static inline bool isReady(SUnit *SU, unsigned CurCycle) {
236 return SU->CycleBound <= CurCycle;
239 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
241 void ScheduleDAGRRList::ListScheduleBottomUp() {
242 unsigned CurCycle = 0;
243 // Add root to Available queue.
244 AvailableQueue->push(SUnitMap[DAG.getRoot().Val]);
246 // While Available queue is not empty, grab the node with the highest
247 // priority. If it is not ready put it back. Schedule the node.
248 std::vector<SUnit*> NotReady;
249 SUnit *CurNode = NULL;
250 while (!AvailableQueue->empty()) {
251 SUnit *CurNode = AvailableQueue->pop();
252 while (CurNode && !isReady(CurNode, CurCycle)) {
253 NotReady.push_back(CurNode);
254 CurNode = AvailableQueue->pop();
257 // Add the nodes that aren't ready back onto the available list.
258 AvailableQueue->push_all(NotReady);
262 ScheduleNodeBottomUp(CurNode, CurCycle);
266 // Add entry node last
267 if (DAG.getEntryNode().Val != DAG.getRoot().Val) {
268 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
269 Sequence.push_back(Entry);
272 // Reverse the order if it is bottom up.
273 std::reverse(Sequence.begin(), Sequence.end());
277 // Verify that all SUnits were scheduled.
278 bool AnyNotSched = false;
279 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
280 if (SUnits[i].NumSuccsLeft != 0 || SUnits[i].NumChainSuccsLeft != 0) {
282 std::cerr << "*** List scheduling failed! ***\n";
283 SUnits[i].dump(&DAG);
284 std::cerr << "has not been scheduled!\n";
288 assert(!AnyNotSched);
292 //===----------------------------------------------------------------------===//
293 // Top-Down Scheduling
294 //===----------------------------------------------------------------------===//
296 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
297 /// the PendingQueue if the count reaches zero.
298 void ScheduleDAGRRList::ReleaseSucc(SUnit *SuccSU, bool isChain,
300 // FIXME: the distance between two nodes is not always == the predecessor's
301 // latency. For example, the reader can very well read the register written
302 // by the predecessor later than the issue cycle. It also depends on the
303 // interrupt model (drain vs. freeze).
304 SuccSU->CycleBound = std::max(SuccSU->CycleBound, CurCycle + SuccSU->Latency);
307 SuccSU->NumPredsLeft--;
309 SuccSU->NumChainPredsLeft--;
312 if (SuccSU->NumPredsLeft < 0 || SuccSU->NumChainPredsLeft < 0) {
313 std::cerr << "*** List scheduling failed! ***\n";
315 std::cerr << " has been released too many times!\n";
320 if ((SuccSU->NumPredsLeft + SuccSU->NumChainPredsLeft) == 0) {
321 SuccSU->isAvailable = true;
322 AvailableQueue->push(SuccSU);
327 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
328 /// count of its successors. If a successor pending count is zero, add it to
329 /// the Available queue.
330 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
331 DEBUG(std::cerr << "*** Scheduling [" << CurCycle << "]: ");
332 DEBUG(SU->dump(&DAG));
333 SU->Cycle = CurCycle;
335 AvailableQueue->ScheduledNode(SU);
336 Sequence.push_back(SU);
338 // Top down: release successors
339 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Succs.begin(),
340 E = SU->Succs.end(); I != E; ++I)
341 ReleaseSucc(I->first, I->second, CurCycle);
342 SU->isScheduled = true;
345 void ScheduleDAGRRList::ListScheduleTopDown() {
346 unsigned CurCycle = 0;
347 SUnit *Entry = SUnitMap[DAG.getEntryNode().Val];
349 // All leaves to Available queue.
350 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
351 // It is available if it has no predecessors.
352 if (SUnits[i].Preds.size() == 0 && &SUnits[i] != Entry) {
353 AvailableQueue->push(&SUnits[i]);
354 SUnits[i].isAvailable = true;
358 // Emit the entry node first.
359 ScheduleNodeTopDown(Entry, CurCycle);
362 // While Available queue is not empty, grab the node with the highest
363 // priority. If it is not ready put it back. Schedule the node.
364 std::vector<SUnit*> NotReady;
365 SUnit *CurNode = NULL;
366 while (!AvailableQueue->empty()) {
367 SUnit *CurNode = AvailableQueue->pop();
368 while (CurNode && !isReady(CurNode, CurCycle)) {
369 NotReady.push_back(CurNode);
370 CurNode = AvailableQueue->pop();
373 // Add the nodes that aren't ready back onto the available list.
374 AvailableQueue->push_all(NotReady);
378 ScheduleNodeTopDown(CurNode, CurCycle);
384 // Verify that all SUnits were scheduled.
385 bool AnyNotSched = false;
386 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
387 if (!SUnits[i].isScheduled) {
389 std::cerr << "*** List scheduling failed! ***\n";
390 SUnits[i].dump(&DAG);
391 std::cerr << "has not been scheduled!\n";
395 assert(!AnyNotSched);
401 //===----------------------------------------------------------------------===//
402 // RegReductionPriorityQueue Implementation
403 //===----------------------------------------------------------------------===//
405 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
406 // to reduce register pressure.
410 class RegReductionPriorityQueue;
412 /// Sorting functions for the Available queue.
413 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
414 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
415 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
416 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
418 bool operator()(const SUnit* left, const SUnit* right) const;
421 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
422 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
423 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
424 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
426 bool operator()(const SUnit* left, const SUnit* right) const;
428 } // end anonymous namespace
432 class RegReductionPriorityQueue : public SchedulingPriorityQueue {
433 std::priority_queue<SUnit*, std::vector<SUnit*>, SF> Queue;
436 RegReductionPriorityQueue() :
439 virtual void initNodes(const std::vector<SUnit> &sunits) {}
440 virtual void releaseState() {}
442 virtual int getSethiUllmanNumber(unsigned NodeNum) const {
446 bool empty() const { return Queue.empty(); }
448 void push(SUnit *U) {
451 void push_all(const std::vector<SUnit *> &Nodes) {
452 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
453 Queue.push(Nodes[i]);
457 if (empty()) return NULL;
458 SUnit *V = Queue.top();
465 class BURegReductionPriorityQueue : public RegReductionPriorityQueue<SF> {
466 // SUnits - The SUnits for the current graph.
467 const std::vector<SUnit> *SUnits;
469 // SethiUllmanNumbers - The SethiUllman number for each node.
470 std::vector<int> SethiUllmanNumbers;
473 BURegReductionPriorityQueue() {}
475 void initNodes(const std::vector<SUnit> &sunits) {
477 // Add pseudo dependency edges for two-address nodes.
478 AddPseudoTwoAddrDeps();
479 // Calculate node priorities.
480 CalculatePriorities();
483 void releaseState() {
485 SethiUllmanNumbers.clear();
488 int getSethiUllmanNumber(unsigned NodeNum) const {
489 assert(NodeNum < SethiUllmanNumbers.size());
490 return SethiUllmanNumbers[NodeNum];
494 void AddPseudoTwoAddrDeps();
495 void CalculatePriorities();
496 int CalcNodePriority(const SUnit *SU);
501 class TDRegReductionPriorityQueue : public RegReductionPriorityQueue<SF> {
502 // SUnits - The SUnits for the current graph.
503 const std::vector<SUnit> *SUnits;
505 // SethiUllmanNumbers - The SethiUllman number for each node.
506 std::vector<int> SethiUllmanNumbers;
509 TDRegReductionPriorityQueue() {}
511 void initNodes(const std::vector<SUnit> &sunits) {
513 // Calculate node priorities.
514 CalculatePriorities();
517 void releaseState() {
519 SethiUllmanNumbers.clear();
522 int getSethiUllmanNumber(unsigned NodeNum) const {
523 assert(NodeNum < SethiUllmanNumbers.size());
524 return SethiUllmanNumbers[NodeNum];
528 void CalculatePriorities();
529 int CalcNodePriority(const SUnit *SU);
533 static bool isFloater(const SUnit *SU) {
534 if (SU->Node->isTargetOpcode()) {
535 if (SU->NumPreds == 0)
537 if (SU->NumPreds == 1) {
538 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
539 E = SU->Preds.end(); I != E; ++I) {
540 if (I->second) continue;
542 SUnit *PredSU = I->first;
543 unsigned Opc = PredSU->Node->getOpcode();
544 if (Opc != ISD::EntryToken && Opc != ISD::TokenFactor &&
545 Opc != ISD::CopyFromReg && Opc != ISD::CopyToReg)
554 static bool isSimpleFloaterUse(const SUnit *SU) {
556 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
557 E = SU->Preds.end(); I != E; ++I) {
558 if (I->second) continue;
561 if (!isFloater(I->first))
568 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
569 unsigned LeftNum = left->NodeNum;
570 unsigned RightNum = right->NodeNum;
571 bool LIsTarget = left->Node->isTargetOpcode();
572 bool RIsTarget = right->Node->isTargetOpcode();
573 int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
574 int RPriority = SPQ->getSethiUllmanNumber(RightNum);
578 // Schedule floaters (e.g. load from some constant address) and those nodes
579 // with a single predecessor each first. They maintain / reduce register
581 if (isFloater(left) || isSimpleFloaterUse(left))
583 if (isFloater(right) || isSimpleFloaterUse(right))
586 // Special tie breaker: if two nodes share a operand, the one that use it
587 // as a def&use operand is preferred.
588 if (LIsTarget && RIsTarget) {
589 if (left->isTwoAddress && !right->isTwoAddress) {
590 SDNode *DUNode = left->Node->getOperand(0).Val;
591 if (DUNode->isOperand(right->Node))
594 if (!left->isTwoAddress && right->isTwoAddress) {
595 SDNode *DUNode = right->Node->getOperand(0).Val;
596 if (DUNode->isOperand(left->Node))
601 if (LPriority+LBonus < RPriority+RBonus)
603 else if (LPriority+LBonus == RPriority+RBonus)
604 if (left->Height > right->Height)
606 else if (left->Height == right->Height)
607 if (left->Depth < right->Depth)
609 else if (left->Depth == right->Depth)
610 if (left->CycleBound > right->CycleBound)
615 static inline bool isCopyFromLiveIn(const SUnit *SU) {
616 SDNode *N = SU->Node;
617 return N->getOpcode() == ISD::CopyFromReg &&
618 N->getOperand(N->getNumOperands()-1).getValueType() != MVT::Flag;
621 // FIXME: This is probably too slow!
622 static void isReachable(SUnit *SU, SUnit *TargetSU,
623 std::set<SUnit *> &Visited, bool &Reached) {
625 if (SU == TargetSU) {
629 if (!Visited.insert(SU).second) return;
631 for (std::set<std::pair<SUnit*, bool> >::iterator I = SU->Preds.begin(),
632 E = SU->Preds.end(); I != E; ++I)
633 isReachable(I->first, TargetSU, Visited, Reached);
636 static bool isReachable(SUnit *SU, SUnit *TargetSU) {
637 std::set<SUnit *> Visited;
638 bool Reached = false;
639 isReachable(SU, TargetSU, Visited, Reached);
643 static SUnit *getDefUsePredecessor(SUnit *SU) {
644 SDNode *DU = SU->Node->getOperand(0).Val;
645 for (std::set<std::pair<SUnit*, bool> >::iterator
646 I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
647 if (I->second) continue; // ignore chain preds
648 SUnit *PredSU = I->first;
649 if (PredSU->Node == DU)
657 static bool canClobber(SUnit *SU, SUnit *Op) {
658 if (SU->isTwoAddress)
659 return Op == getDefUsePredecessor(SU);
663 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
664 /// it as a def&use operand. Add a pseudo control edge from it to the other
665 /// node (if it won't create a cycle) so the two-address one will be scheduled
666 /// first (lower in the schedule).
668 void BURegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
669 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
670 SUnit *SU = (SUnit *)&((*SUnits)[i]);
671 SDNode *Node = SU->Node;
672 if (!Node->isTargetOpcode())
675 if (SU->isTwoAddress) {
676 SUnit *DUSU = getDefUsePredecessor(SU);
679 for (std::set<std::pair<SUnit*, bool> >::iterator I = DUSU->Succs.begin(),
680 E = DUSU->Succs.end(); I != E; ++I) {
681 if (I->second) continue;
682 SUnit *SuccSU = I->first;
684 (!canClobber(SuccSU, DUSU) ||
685 (!SU->isCommutable && SuccSU->isCommutable))){
686 if (SuccSU->Depth == SU->Depth && !isReachable(SuccSU, SU)) {
687 DEBUG(std::cerr << "Adding an edge from SU # " << SU->NodeNum
688 << " to SU #" << SuccSU->NodeNum << "\n");
689 if (SU->Preds.insert(std::make_pair(SuccSU, true)).second)
690 SU->NumChainPredsLeft++;
691 if (SuccSU->Succs.insert(std::make_pair(SU, true)).second)
692 SuccSU->NumChainSuccsLeft++;
700 /// CalcNodePriority - Priority is the Sethi Ullman number.
701 /// Smaller number is the higher priority.
703 int BURegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
704 int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
705 if (SethiUllmanNumber != 0)
706 return SethiUllmanNumber;
708 unsigned Opc = SU->Node->getOpcode();
709 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
710 SethiUllmanNumber = INT_MAX - 10;
711 else if (SU->NumSuccsLeft == 0)
712 // If SU does not have a use, i.e. it doesn't produce a value that would
713 // be consumed (e.g. store), then it terminates a chain of computation.
714 // Give it a small SethiUllman number so it will be scheduled right before its
715 // predecessors that it doesn't lengthen their live ranges.
716 SethiUllmanNumber = INT_MIN + 10;
717 // FIXME: remove this else if? It seems to reduce register spills but often
718 // ends up increasing runtime. Need to investigate.
719 else if (SU->NumPredsLeft == 0 &&
720 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
721 SethiUllmanNumber = INT_MAX - 10;
724 for (std::set<std::pair<SUnit*, bool> >::const_iterator
725 I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
726 if (I->second) continue; // ignore chain preds
727 SUnit *PredSU = I->first;
728 int PredSethiUllman = CalcNodePriority(PredSU);
729 if (PredSethiUllman > SethiUllmanNumber) {
730 SethiUllmanNumber = PredSethiUllman;
732 } else if (PredSethiUllman == SethiUllmanNumber && !I->second)
736 SethiUllmanNumber += Extra;
739 return SethiUllmanNumber;
742 /// CalculatePriorities - Calculate priorities of all scheduling units.
744 void BURegReductionPriorityQueue<SF>::CalculatePriorities() {
745 SethiUllmanNumbers.assign(SUnits->size(), 0);
747 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
748 CalcNodePriority(&(*SUnits)[i]);
751 static unsigned SumOfUnscheduledPredsOfSuccs(const SUnit *SU) {
753 for (std::set<std::pair<SUnit*, bool> >::const_iterator
754 I = SU->Succs.begin(), E = SU->Succs.end(); I != E; ++I) {
755 SUnit *SuccSU = I->first;
756 for (std::set<std::pair<SUnit*, bool> >::const_iterator
757 II = SuccSU->Preds.begin(), EE = SuccSU->Preds.end(); II != EE; ++II) {
758 SUnit *PredSU = II->first;
759 if (!PredSU->isScheduled)
769 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
770 unsigned LeftNum = left->NodeNum;
771 unsigned RightNum = right->NodeNum;
772 int LPriority = SPQ->getSethiUllmanNumber(LeftNum);
773 int RPriority = SPQ->getSethiUllmanNumber(RightNum);
774 bool LIsTarget = left->Node->isTargetOpcode();
775 bool RIsTarget = right->Node->isTargetOpcode();
776 bool LIsFloater = LIsTarget && left->NumPreds == 0;
777 bool RIsFloater = RIsTarget && right->NumPreds == 0;
778 unsigned LBonus = (SumOfUnscheduledPredsOfSuccs(left) == 1) ? 2 : 0;
779 unsigned RBonus = (SumOfUnscheduledPredsOfSuccs(right) == 1) ? 2 : 0;
781 if (left->NumSuccs == 0 && right->NumSuccs != 0)
783 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
786 // Special tie breaker: if two nodes share a operand, the one that use it
787 // as a def&use operand is preferred.
788 if (LIsTarget && RIsTarget) {
789 if (left->isTwoAddress && !right->isTwoAddress) {
790 SDNode *DUNode = left->Node->getOperand(0).Val;
791 if (DUNode->isOperand(right->Node))
794 if (!left->isTwoAddress && right->isTwoAddress) {
795 SDNode *DUNode = right->Node->getOperand(0).Val;
796 if (DUNode->isOperand(left->Node))
804 if (left->NumSuccs == 1)
806 if (right->NumSuccs == 1)
809 if (LPriority+LBonus < RPriority+RBonus)
811 else if (LPriority == RPriority)
812 if (left->Depth < right->Depth)
814 else if (left->Depth == right->Depth)
815 if (left->NumSuccsLeft > right->NumSuccsLeft)
817 else if (left->NumSuccsLeft == right->NumSuccsLeft)
818 if (left->CycleBound > right->CycleBound)
823 /// CalcNodePriority - Priority is the Sethi Ullman number.
824 /// Smaller number is the higher priority.
826 int TDRegReductionPriorityQueue<SF>::CalcNodePriority(const SUnit *SU) {
827 int &SethiUllmanNumber = SethiUllmanNumbers[SU->NodeNum];
828 if (SethiUllmanNumber != 0)
829 return SethiUllmanNumber;
831 unsigned Opc = SU->Node->getOpcode();
832 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
833 SethiUllmanNumber = INT_MAX - 10;
834 else if (SU->NumSuccsLeft == 0)
835 // If SU does not have a use, i.e. it doesn't produce a value that would
836 // be consumed (e.g. store), then it terminates a chain of computation.
837 // Give it a small SethiUllman number so it will be scheduled right before its
838 // predecessors that it doesn't lengthen their live ranges.
839 SethiUllmanNumber = INT_MIN + 10;
840 else if (SU->NumPredsLeft == 0 &&
841 (Opc != ISD::CopyFromReg || isCopyFromLiveIn(SU)))
842 SethiUllmanNumber = 1;
845 for (std::set<std::pair<SUnit*, bool> >::const_iterator
846 I = SU->Preds.begin(), E = SU->Preds.end(); I != E; ++I) {
847 if (I->second) continue; // ignore chain preds
848 SUnit *PredSU = I->first;
849 int PredSethiUllman = CalcNodePriority(PredSU);
850 if (PredSethiUllman > SethiUllmanNumber) {
851 SethiUllmanNumber = PredSethiUllman;
853 } else if (PredSethiUllman == SethiUllmanNumber && !I->second)
857 SethiUllmanNumber += Extra;
860 return SethiUllmanNumber;
863 /// CalculatePriorities - Calculate priorities of all scheduling units.
865 void TDRegReductionPriorityQueue<SF>::CalculatePriorities() {
866 SethiUllmanNumbers.assign(SUnits->size(), 0);
868 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
869 CalcNodePriority(&(*SUnits)[i]);
872 //===----------------------------------------------------------------------===//
873 // Public Constructor Functions
874 //===----------------------------------------------------------------------===//
876 llvm::ScheduleDAG* llvm::createBURRListDAGScheduler(SelectionDAG &DAG,
877 MachineBasicBlock *BB) {
878 return new ScheduleDAGRRList(DAG, BB, DAG.getTarget(), true,
879 new BURegReductionPriorityQueue<bu_ls_rr_sort>());
882 llvm::ScheduleDAG* llvm::createTDRRListDAGScheduler(SelectionDAG &DAG,
883 MachineBasicBlock *BB) {
884 return new ScheduleDAGRRList(DAG, BB, DAG.getTarget(), false,
885 new TDRegReductionPriorityQueue<td_ls_rr_sort>());