1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/CodeGen/SchedulerRegistry.h"
21 #include "llvm/CodeGen/SelectionDAGISel.h"
22 #include "llvm/Target/TargetRegisterInfo.h"
23 #include "llvm/Target/TargetData.h"
24 #include "llvm/Target/TargetMachine.h"
25 #include "llvm/Target/TargetInstrInfo.h"
26 #include "llvm/Support/Debug.h"
27 #include "llvm/Support/Compiler.h"
28 #include "llvm/Support/ErrorHandling.h"
29 #include "llvm/ADT/PriorityQueue.h"
30 #include "llvm/ADT/SmallSet.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/Support/raw_ostream.h"
37 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
38 STATISTIC(NumUnfolds, "Number of nodes unfolded");
39 STATISTIC(NumDups, "Number of duplicated nodes");
40 STATISTIC(NumPRCopies, "Number of physical register copies");
42 static RegisterScheduler
43 burrListDAGScheduler("list-burr",
44 "Bottom-up register reduction list scheduling",
45 createBURRListDAGScheduler);
46 static RegisterScheduler
47 tdrListrDAGScheduler("list-tdrr",
48 "Top-down register reduction list scheduling",
49 createTDRRListDAGScheduler);
52 //===----------------------------------------------------------------------===//
53 /// ScheduleDAGRRList - The actual register reduction list scheduler
54 /// implementation. This supports both top-down and bottom-up scheduling.
56 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
58 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
62 /// AvailableQueue - The priority queue to use for the available SUnits.
63 SchedulingPriorityQueue *AvailableQueue;
65 /// LiveRegDefs - A set of physical registers and their definition
66 /// that are "live". These nodes must be scheduled before any other nodes that
67 /// modifies the registers can be scheduled.
69 std::vector<SUnit*> LiveRegDefs;
70 std::vector<unsigned> LiveRegCycles;
72 /// Topo - A topological ordering for SUnits which permits fast IsReachable
73 /// and similar queries.
74 ScheduleDAGTopologicalSort Topo;
77 ScheduleDAGRRList(MachineFunction &mf,
79 SchedulingPriorityQueue *availqueue)
80 : ScheduleDAGSDNodes(mf), isBottomUp(isbottomup),
81 AvailableQueue(availqueue), Topo(SUnits) {
84 ~ScheduleDAGRRList() {
85 delete AvailableQueue;
90 /// IsReachable - Checks if SU is reachable from TargetSU.
91 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
92 return Topo.IsReachable(SU, TargetSU);
95 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
97 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
98 return Topo.WillCreateCycle(SU, TargetSU);
101 /// AddPred - adds a predecessor edge to SUnit SU.
102 /// This returns true if this is a new predecessor.
103 /// Updates the topological ordering if required.
104 void AddPred(SUnit *SU, const SDep &D) {
105 Topo.AddPred(SU, D.getSUnit());
109 /// RemovePred - removes a predecessor edge from SUnit SU.
110 /// This returns true if an edge was removed.
111 /// Updates the topological ordering if required.
112 void RemovePred(SUnit *SU, const SDep &D) {
113 Topo.RemovePred(SU, D.getSUnit());
118 void ReleasePred(SUnit *SU, const SDep *PredEdge);
119 void ReleasePredecessors(SUnit *SU, unsigned CurCycle);
120 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
121 void ReleaseSuccessors(SUnit *SU);
122 void CapturePred(SDep *PredEdge);
123 void ScheduleNodeBottomUp(SUnit*, unsigned);
124 void ScheduleNodeTopDown(SUnit*, unsigned);
125 void UnscheduleNodeBottomUp(SUnit*);
126 void BacktrackBottomUp(SUnit*, unsigned, unsigned&);
127 SUnit *CopyAndMoveSuccessors(SUnit*);
128 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
129 const TargetRegisterClass*,
130 const TargetRegisterClass*,
131 SmallVector<SUnit*, 2>&);
132 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
133 void ListScheduleTopDown();
134 void ListScheduleBottomUp();
137 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
138 /// Updates the topological ordering if required.
139 SUnit *CreateNewSUnit(SDNode *N) {
140 unsigned NumSUnits = SUnits.size();
141 SUnit *NewNode = NewSUnit(N);
142 // Update the topological ordering.
143 if (NewNode->NodeNum >= NumSUnits)
144 Topo.InitDAGTopologicalSorting();
148 /// CreateClone - Creates a new SUnit from an existing one.
149 /// Updates the topological ordering if required.
150 SUnit *CreateClone(SUnit *N) {
151 unsigned NumSUnits = SUnits.size();
152 SUnit *NewNode = Clone(N);
153 // Update the topological ordering.
154 if (NewNode->NodeNum >= NumSUnits)
155 Topo.InitDAGTopologicalSorting();
159 /// ForceUnitLatencies - Return true, since register-pressure-reducing
160 /// scheduling doesn't need actual latency information.
161 bool ForceUnitLatencies() const { return true; }
163 } // end anonymous namespace
166 /// Schedule - Schedule the DAG using list scheduling.
167 void ScheduleDAGRRList::Schedule() {
168 DEBUG(errs() << "********** List Scheduling **********\n");
171 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
172 LiveRegCycles.resize(TRI->getNumRegs(), 0);
174 // Build the scheduling graph.
175 BuildSchedGraph(NULL);
177 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
178 SUnits[su].dumpAll(this));
179 Topo.InitDAGTopologicalSorting();
181 AvailableQueue->initNodes(SUnits);
183 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
185 ListScheduleBottomUp();
187 ListScheduleTopDown();
189 AvailableQueue->releaseState();
192 //===----------------------------------------------------------------------===//
193 // Bottom-Up Scheduling
194 //===----------------------------------------------------------------------===//
196 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
197 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
198 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
199 SUnit *PredSU = PredEdge->getSUnit();
202 if (PredSU->NumSuccsLeft == 0) {
203 errs() << "*** Scheduling failed! ***\n";
205 errs() << " has been released too many times!\n";
209 --PredSU->NumSuccsLeft;
211 // If all the node's successors are scheduled, this node is ready
212 // to be scheduled. Ignore the special EntrySU node.
213 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
214 PredSU->isAvailable = true;
215 AvailableQueue->push(PredSU);
219 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU, unsigned CurCycle) {
220 // Bottom up: release predecessors
221 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
223 ReleasePred(SU, &*I);
224 if (I->isAssignedRegDep()) {
225 // This is a physical register dependency and it's impossible or
226 // expensive to copy the register. Make sure nothing that can
227 // clobber the register is scheduled between the predecessor and
229 if (!LiveRegDefs[I->getReg()]) {
231 LiveRegDefs[I->getReg()] = I->getSUnit();
232 LiveRegCycles[I->getReg()] = CurCycle;
238 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
239 /// count of its predecessors. If a predecessor pending count is zero, add it to
240 /// the Available queue.
241 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU, unsigned CurCycle) {
242 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
243 DEBUG(SU->dump(this));
245 assert(CurCycle >= SU->getHeight() && "Node scheduled below its height!");
246 SU->setHeightToAtLeast(CurCycle);
247 Sequence.push_back(SU);
249 ReleasePredecessors(SU, CurCycle);
251 // Release all the implicit physical register defs that are live.
252 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
254 if (I->isAssignedRegDep()) {
255 if (LiveRegCycles[I->getReg()] == I->getSUnit()->getHeight()) {
256 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
257 assert(LiveRegDefs[I->getReg()] == SU &&
258 "Physical register dependency violated?");
260 LiveRegDefs[I->getReg()] = NULL;
261 LiveRegCycles[I->getReg()] = 0;
266 SU->isScheduled = true;
267 AvailableQueue->ScheduledNode(SU);
270 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
271 /// unscheduled, incrcease the succ left count of its predecessors. Remove
272 /// them from AvailableQueue if necessary.
273 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
274 SUnit *PredSU = PredEdge->getSUnit();
275 if (PredSU->isAvailable) {
276 PredSU->isAvailable = false;
277 if (!PredSU->isPending)
278 AvailableQueue->remove(PredSU);
281 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
282 ++PredSU->NumSuccsLeft;
285 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
286 /// its predecessor states to reflect the change.
287 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
288 DEBUG(errs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
289 DEBUG(SU->dump(this));
291 AvailableQueue->UnscheduledNode(SU);
293 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
296 if (I->isAssignedRegDep() && SU->getHeight() == LiveRegCycles[I->getReg()]) {
297 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
298 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
299 "Physical register dependency violated?");
301 LiveRegDefs[I->getReg()] = NULL;
302 LiveRegCycles[I->getReg()] = 0;
306 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
308 if (I->isAssignedRegDep()) {
309 if (!LiveRegDefs[I->getReg()]) {
310 LiveRegDefs[I->getReg()] = SU;
313 if (I->getSUnit()->getHeight() < LiveRegCycles[I->getReg()])
314 LiveRegCycles[I->getReg()] = I->getSUnit()->getHeight();
318 SU->setHeightDirty();
319 SU->isScheduled = false;
320 SU->isAvailable = true;
321 AvailableQueue->push(SU);
324 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
325 /// BTCycle in order to schedule a specific node.
326 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, unsigned BtCycle,
327 unsigned &CurCycle) {
329 while (CurCycle > BtCycle) {
330 OldSU = Sequence.back();
332 if (SU->isSucc(OldSU))
333 // Don't try to remove SU from AvailableQueue.
334 SU->isAvailable = false;
335 UnscheduleNodeBottomUp(OldSU);
339 assert(!SU->isSucc(OldSU) && "Something is wrong!");
344 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
345 /// successors to the newly created node.
346 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
347 if (SU->getNode()->getFlaggedNode())
350 SDNode *N = SU->getNode();
355 bool TryUnfold = false;
356 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
357 EVT VT = N->getValueType(i);
360 else if (VT == MVT::Other)
363 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
364 const SDValue &Op = N->getOperand(i);
365 EVT VT = Op.getNode()->getValueType(Op.getResNo());
371 SmallVector<SDNode*, 2> NewNodes;
372 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
375 DEBUG(errs() << "Unfolding SU # " << SU->NodeNum << "\n");
376 assert(NewNodes.size() == 2 && "Expected a load folding node!");
379 SDNode *LoadNode = NewNodes[0];
380 unsigned NumVals = N->getNumValues();
381 unsigned OldNumVals = SU->getNode()->getNumValues();
382 for (unsigned i = 0; i != NumVals; ++i)
383 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
384 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
385 SDValue(LoadNode, 1));
387 // LoadNode may already exist. This can happen when there is another
388 // load from the same location and producing the same type of value
389 // but it has different alignment or volatileness.
390 bool isNewLoad = true;
392 if (LoadNode->getNodeId() != -1) {
393 LoadSU = &SUnits[LoadNode->getNodeId()];
396 LoadSU = CreateNewSUnit(LoadNode);
397 LoadNode->setNodeId(LoadSU->NodeNum);
398 ComputeLatency(LoadSU);
401 SUnit *NewSU = CreateNewSUnit(N);
402 assert(N->getNodeId() == -1 && "Node already inserted!");
403 N->setNodeId(NewSU->NodeNum);
405 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
406 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
407 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
408 NewSU->isTwoAddress = true;
412 if (TID.isCommutable())
413 NewSU->isCommutable = true;
414 ComputeLatency(NewSU);
416 // Record all the edges to and from the old SU, by category.
417 SmallVector<SDep, 4> ChainPreds;
418 SmallVector<SDep, 4> ChainSuccs;
419 SmallVector<SDep, 4> LoadPreds;
420 SmallVector<SDep, 4> NodePreds;
421 SmallVector<SDep, 4> NodeSuccs;
422 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
425 ChainPreds.push_back(*I);
426 else if (I->getSUnit()->getNode() &&
427 I->getSUnit()->getNode()->isOperandOf(LoadNode))
428 LoadPreds.push_back(*I);
430 NodePreds.push_back(*I);
432 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
435 ChainSuccs.push_back(*I);
437 NodeSuccs.push_back(*I);
440 // Now assign edges to the newly-created nodes.
441 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
442 const SDep &Pred = ChainPreds[i];
443 RemovePred(SU, Pred);
445 AddPred(LoadSU, Pred);
447 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
448 const SDep &Pred = LoadPreds[i];
449 RemovePred(SU, Pred);
451 AddPred(LoadSU, Pred);
453 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
454 const SDep &Pred = NodePreds[i];
455 RemovePred(SU, Pred);
456 AddPred(NewSU, Pred);
458 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
459 SDep D = NodeSuccs[i];
460 SUnit *SuccDep = D.getSUnit();
462 RemovePred(SuccDep, D);
466 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
467 SDep D = ChainSuccs[i];
468 SUnit *SuccDep = D.getSUnit();
470 RemovePred(SuccDep, D);
477 // Add a data dependency to reflect that NewSU reads the value defined
479 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
482 AvailableQueue->addNode(LoadSU);
483 AvailableQueue->addNode(NewSU);
487 if (NewSU->NumSuccsLeft == 0) {
488 NewSU->isAvailable = true;
494 DEBUG(errs() << "Duplicating SU # " << SU->NodeNum << "\n");
495 NewSU = CreateClone(SU);
497 // New SUnit has the exact same predecessors.
498 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
500 if (!I->isArtificial())
503 // Only copy scheduled successors. Cut them from old node's successor
504 // list and move them over.
505 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
506 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
508 if (I->isArtificial())
510 SUnit *SuccSU = I->getSUnit();
511 if (SuccSU->isScheduled) {
516 DelDeps.push_back(std::make_pair(SuccSU, D));
519 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
520 RemovePred(DelDeps[i].first, DelDeps[i].second);
522 AvailableQueue->updateNode(SU);
523 AvailableQueue->addNode(NewSU);
529 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
530 /// scheduled successors of the given SUnit to the last copy.
531 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
532 const TargetRegisterClass *DestRC,
533 const TargetRegisterClass *SrcRC,
534 SmallVector<SUnit*, 2> &Copies) {
535 SUnit *CopyFromSU = CreateNewSUnit(NULL);
536 CopyFromSU->CopySrcRC = SrcRC;
537 CopyFromSU->CopyDstRC = DestRC;
539 SUnit *CopyToSU = CreateNewSUnit(NULL);
540 CopyToSU->CopySrcRC = DestRC;
541 CopyToSU->CopyDstRC = SrcRC;
543 // Only copy scheduled successors. Cut them from old node's successor
544 // list and move them over.
545 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
546 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
548 if (I->isArtificial())
550 SUnit *SuccSU = I->getSUnit();
551 if (SuccSU->isScheduled) {
553 D.setSUnit(CopyToSU);
555 DelDeps.push_back(std::make_pair(SuccSU, *I));
558 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
559 RemovePred(DelDeps[i].first, DelDeps[i].second);
561 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
562 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
564 AvailableQueue->updateNode(SU);
565 AvailableQueue->addNode(CopyFromSU);
566 AvailableQueue->addNode(CopyToSU);
567 Copies.push_back(CopyFromSU);
568 Copies.push_back(CopyToSU);
573 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
574 /// definition of the specified node.
575 /// FIXME: Move to SelectionDAG?
576 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
577 const TargetInstrInfo *TII) {
578 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
579 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
580 unsigned NumRes = TID.getNumDefs();
581 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
586 return N->getValueType(NumRes);
589 /// CheckForLiveRegDef - Return true and update live register vector if the
590 /// specified register def of the specified SUnit clobbers any "live" registers.
591 static bool CheckForLiveRegDef(SUnit *SU, unsigned Reg,
592 std::vector<SUnit*> &LiveRegDefs,
593 SmallSet<unsigned, 4> &RegAdded,
594 SmallVector<unsigned, 4> &LRegs,
595 const TargetRegisterInfo *TRI) {
597 if (LiveRegDefs[Reg] && LiveRegDefs[Reg] != SU) {
598 if (RegAdded.insert(Reg)) {
599 LRegs.push_back(Reg);
603 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias)
604 if (LiveRegDefs[*Alias] && LiveRegDefs[*Alias] != SU) {
605 if (RegAdded.insert(*Alias)) {
606 LRegs.push_back(*Alias);
613 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
614 /// scheduling of the given node to satisfy live physical register dependencies.
615 /// If the specific node is the last one that's available to schedule, do
616 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
617 bool ScheduleDAGRRList::DelayForLiveRegsBottomUp(SUnit *SU,
618 SmallVector<unsigned, 4> &LRegs){
619 if (NumLiveRegs == 0)
622 SmallSet<unsigned, 4> RegAdded;
623 // If this node would clobber any "live" register, then it's not ready.
624 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
626 if (I->isAssignedRegDep())
627 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
628 RegAdded, LRegs, TRI);
631 for (SDNode *Node = SU->getNode(); Node; Node = Node->getFlaggedNode()) {
632 if (Node->getOpcode() == ISD::INLINEASM) {
633 // Inline asm can clobber physical defs.
634 unsigned NumOps = Node->getNumOperands();
635 if (Node->getOperand(NumOps-1).getValueType() == MVT::Flag)
636 --NumOps; // Ignore the flag operand.
638 for (unsigned i = 2; i != NumOps;) {
640 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
641 unsigned NumVals = (Flags & 0xffff) >> 3;
643 ++i; // Skip the ID value.
644 if ((Flags & 7) == 2 || (Flags & 7) == 6) {
645 // Check for def of register or earlyclobber register.
646 for (; NumVals; --NumVals, ++i) {
647 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
648 if (TargetRegisterInfo::isPhysicalRegister(Reg))
649 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
657 if (!Node->isMachineOpcode())
659 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
660 if (!TID.ImplicitDefs)
662 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
663 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
665 return !LRegs.empty();
669 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
671 void ScheduleDAGRRList::ListScheduleBottomUp() {
672 unsigned CurCycle = 0;
674 // Release any predecessors of the special Exit node.
675 ReleasePredecessors(&ExitSU, CurCycle);
677 // Add root to Available queue.
678 if (!SUnits.empty()) {
679 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
680 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
681 RootSU->isAvailable = true;
682 AvailableQueue->push(RootSU);
685 // While Available queue is not empty, grab the node with the highest
686 // priority. If it is not ready put it back. Schedule the node.
687 SmallVector<SUnit*, 4> NotReady;
688 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
689 Sequence.reserve(SUnits.size());
690 while (!AvailableQueue->empty()) {
691 bool Delayed = false;
693 SUnit *CurSU = AvailableQueue->pop();
695 SmallVector<unsigned, 4> LRegs;
696 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
699 LRegsMap.insert(std::make_pair(CurSU, LRegs));
701 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
702 NotReady.push_back(CurSU);
703 CurSU = AvailableQueue->pop();
706 // All candidates are delayed due to live physical reg dependencies.
707 // Try backtracking, code duplication, or inserting cross class copies
709 if (Delayed && !CurSU) {
710 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
711 SUnit *TrySU = NotReady[i];
712 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
714 // Try unscheduling up to the point where it's safe to schedule
716 unsigned LiveCycle = CurCycle;
717 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
718 unsigned Reg = LRegs[j];
719 unsigned LCycle = LiveRegCycles[Reg];
720 LiveCycle = std::min(LiveCycle, LCycle);
722 SUnit *OldSU = Sequence[LiveCycle];
723 if (!WillCreateCycle(TrySU, OldSU)) {
724 BacktrackBottomUp(TrySU, LiveCycle, CurCycle);
725 // Force the current node to be scheduled before the node that
726 // requires the physical reg dep.
727 if (OldSU->isAvailable) {
728 OldSU->isAvailable = false;
729 AvailableQueue->remove(OldSU);
731 AddPred(TrySU, SDep(OldSU, SDep::Order, /*Latency=*/1,
732 /*Reg=*/0, /*isNormalMemory=*/false,
733 /*isMustAlias=*/false, /*isArtificial=*/true));
734 // If one or more successors has been unscheduled, then the current
735 // node is no longer avaialable. Schedule a successor that's now
736 // available instead.
737 if (!TrySU->isAvailable)
738 CurSU = AvailableQueue->pop();
741 TrySU->isPending = false;
742 NotReady.erase(NotReady.begin()+i);
749 // Can't backtrack. If it's too expensive to copy the value, then try
750 // duplicate the nodes that produces these "too expensive to copy"
751 // values to break the dependency. In case even that doesn't work,
752 // insert cross class copies.
753 // If it's not too expensive, i.e. cost != -1, issue copies.
754 SUnit *TrySU = NotReady[0];
755 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
756 assert(LRegs.size() == 1 && "Can't handle this yet!");
757 unsigned Reg = LRegs[0];
758 SUnit *LRDef = LiveRegDefs[Reg];
759 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
760 const TargetRegisterClass *RC =
761 TRI->getPhysicalRegisterRegClass(Reg, VT);
762 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
764 // If cross copy register class is null, then it must be possible copy
765 // the value directly. Do not try duplicate the def.
768 NewDef = CopyAndMoveSuccessors(LRDef);
772 // Issue copies, these can be expensive cross register class copies.
773 SmallVector<SUnit*, 2> Copies;
774 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
775 DEBUG(errs() << "Adding an edge from SU #" << TrySU->NodeNum
776 << " to SU #" << Copies.front()->NodeNum << "\n");
777 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
778 /*Reg=*/0, /*isNormalMemory=*/false,
779 /*isMustAlias=*/false,
780 /*isArtificial=*/true));
781 NewDef = Copies.back();
784 DEBUG(errs() << "Adding an edge from SU #" << NewDef->NodeNum
785 << " to SU #" << TrySU->NodeNum << "\n");
786 LiveRegDefs[Reg] = NewDef;
787 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
788 /*Reg=*/0, /*isNormalMemory=*/false,
789 /*isMustAlias=*/false,
790 /*isArtificial=*/true));
791 TrySU->isAvailable = false;
795 assert(CurSU && "Unable to resolve live physical register dependencies!");
798 // Add the nodes that aren't ready back onto the available list.
799 for (unsigned i = 0, e = NotReady.size(); i != e; ++i) {
800 NotReady[i]->isPending = false;
801 // May no longer be available due to backtracking.
802 if (NotReady[i]->isAvailable)
803 AvailableQueue->push(NotReady[i]);
808 ScheduleNodeBottomUp(CurSU, CurCycle);
812 // Reverse the order if it is bottom up.
813 std::reverse(Sequence.begin(), Sequence.end());
816 VerifySchedule(isBottomUp);
820 //===----------------------------------------------------------------------===//
821 // Top-Down Scheduling
822 //===----------------------------------------------------------------------===//
824 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
825 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
826 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
827 SUnit *SuccSU = SuccEdge->getSUnit();
830 if (SuccSU->NumPredsLeft == 0) {
831 errs() << "*** Scheduling failed! ***\n";
833 errs() << " has been released too many times!\n";
837 --SuccSU->NumPredsLeft;
839 // If all the node's predecessors are scheduled, this node is ready
840 // to be scheduled. Ignore the special ExitSU node.
841 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
842 SuccSU->isAvailable = true;
843 AvailableQueue->push(SuccSU);
847 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
848 // Top down: release successors
849 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
851 assert(!I->isAssignedRegDep() &&
852 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
854 ReleaseSucc(SU, &*I);
858 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
859 /// count of its successors. If a successor pending count is zero, add it to
860 /// the Available queue.
861 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
862 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
863 DEBUG(SU->dump(this));
865 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
866 SU->setDepthToAtLeast(CurCycle);
867 Sequence.push_back(SU);
869 ReleaseSuccessors(SU);
870 SU->isScheduled = true;
871 AvailableQueue->ScheduledNode(SU);
874 /// ListScheduleTopDown - The main loop of list scheduling for top-down
876 void ScheduleDAGRRList::ListScheduleTopDown() {
877 unsigned CurCycle = 0;
879 // Release any successors of the special Entry node.
880 ReleaseSuccessors(&EntrySU);
882 // All leaves to Available queue.
883 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
884 // It is available if it has no predecessors.
885 if (SUnits[i].Preds.empty()) {
886 AvailableQueue->push(&SUnits[i]);
887 SUnits[i].isAvailable = true;
891 // While Available queue is not empty, grab the node with the highest
892 // priority. If it is not ready put it back. Schedule the node.
893 Sequence.reserve(SUnits.size());
894 while (!AvailableQueue->empty()) {
895 SUnit *CurSU = AvailableQueue->pop();
898 ScheduleNodeTopDown(CurSU, CurCycle);
903 VerifySchedule(isBottomUp);
908 //===----------------------------------------------------------------------===//
909 // RegReductionPriorityQueue Implementation
910 //===----------------------------------------------------------------------===//
912 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
913 // to reduce register pressure.
917 class RegReductionPriorityQueue;
919 /// Sorting functions for the Available queue.
920 struct bu_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
921 RegReductionPriorityQueue<bu_ls_rr_sort> *SPQ;
922 bu_ls_rr_sort(RegReductionPriorityQueue<bu_ls_rr_sort> *spq) : SPQ(spq) {}
923 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
925 bool operator()(const SUnit* left, const SUnit* right) const;
928 struct td_ls_rr_sort : public std::binary_function<SUnit*, SUnit*, bool> {
929 RegReductionPriorityQueue<td_ls_rr_sort> *SPQ;
930 td_ls_rr_sort(RegReductionPriorityQueue<td_ls_rr_sort> *spq) : SPQ(spq) {}
931 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
933 bool operator()(const SUnit* left, const SUnit* right) const;
935 } // end anonymous namespace
937 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
938 /// Smaller number is the higher priority.
940 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
941 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
942 if (SethiUllmanNumber != 0)
943 return SethiUllmanNumber;
946 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
948 if (I->isCtrl()) continue; // ignore chain preds
949 SUnit *PredSU = I->getSUnit();
950 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
951 if (PredSethiUllman > SethiUllmanNumber) {
952 SethiUllmanNumber = PredSethiUllman;
954 } else if (PredSethiUllman == SethiUllmanNumber)
958 SethiUllmanNumber += Extra;
960 if (SethiUllmanNumber == 0)
961 SethiUllmanNumber = 1;
963 return SethiUllmanNumber;
968 class RegReductionPriorityQueue : public SchedulingPriorityQueue {
969 PriorityQueue<SUnit*, std::vector<SUnit*>, SF> Queue;
970 unsigned currentQueueId;
973 // SUnits - The SUnits for the current graph.
974 std::vector<SUnit> *SUnits;
976 const TargetInstrInfo *TII;
977 const TargetRegisterInfo *TRI;
978 ScheduleDAGRRList *scheduleDAG;
980 // SethiUllmanNumbers - The SethiUllman number for each node.
981 std::vector<unsigned> SethiUllmanNumbers;
984 RegReductionPriorityQueue(const TargetInstrInfo *tii,
985 const TargetRegisterInfo *tri) :
986 Queue(SF(this)), currentQueueId(0),
987 TII(tii), TRI(tri), scheduleDAG(NULL) {}
989 void initNodes(std::vector<SUnit> &sunits) {
991 // Add pseudo dependency edges for two-address nodes.
992 AddPseudoTwoAddrDeps();
993 // Reroute edges to nodes with multiple uses.
994 PrescheduleNodesWithMultipleUses();
995 // Calculate node priorities.
996 CalculateSethiUllmanNumbers();
999 void addNode(const SUnit *SU) {
1000 unsigned SUSize = SethiUllmanNumbers.size();
1001 if (SUnits->size() > SUSize)
1002 SethiUllmanNumbers.resize(SUSize*2, 0);
1003 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1006 void updateNode(const SUnit *SU) {
1007 SethiUllmanNumbers[SU->NodeNum] = 0;
1008 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1011 void releaseState() {
1013 SethiUllmanNumbers.clear();
1016 unsigned getNodePriority(const SUnit *SU) const {
1017 assert(SU->NodeNum < SethiUllmanNumbers.size());
1018 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1019 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1020 // CopyToReg should be close to its uses to facilitate coalescing and
1023 if (Opc == TargetInstrInfo::EXTRACT_SUBREG ||
1024 Opc == TargetInstrInfo::SUBREG_TO_REG ||
1025 Opc == TargetInstrInfo::INSERT_SUBREG)
1026 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1027 // close to their uses to facilitate coalescing.
1029 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1030 // If SU does not have a register use, i.e. it doesn't produce a value
1031 // that would be consumed (e.g. store), then it terminates a chain of
1032 // computation. Give it a large SethiUllman number so it will be
1033 // scheduled right before its predecessors that it doesn't lengthen
1034 // their live ranges.
1036 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1037 // If SU does not have a register def, schedule it close to its uses
1038 // because it does not lengthen any live ranges.
1040 return SethiUllmanNumbers[SU->NodeNum];
1043 unsigned size() const { return Queue.size(); }
1045 bool empty() const { return Queue.empty(); }
1047 void push(SUnit *U) {
1048 assert(!U->NodeQueueId && "Node in the queue already");
1049 U->NodeQueueId = ++currentQueueId;
1053 void push_all(const std::vector<SUnit *> &Nodes) {
1054 for (unsigned i = 0, e = Nodes.size(); i != e; ++i)
1059 if (empty()) return NULL;
1060 SUnit *V = Queue.top();
1066 void remove(SUnit *SU) {
1067 assert(!Queue.empty() && "Queue is empty!");
1068 assert(SU->NodeQueueId != 0 && "Not in queue!");
1069 Queue.erase_one(SU);
1070 SU->NodeQueueId = 0;
1073 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1074 scheduleDAG = scheduleDag;
1078 bool canClobber(const SUnit *SU, const SUnit *Op);
1079 void AddPseudoTwoAddrDeps();
1080 void PrescheduleNodesWithMultipleUses();
1081 void CalculateSethiUllmanNumbers();
1084 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1085 BURegReductionPriorityQueue;
1087 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1088 TDRegReductionPriorityQueue;
1091 /// closestSucc - Returns the scheduled cycle of the successor which is
1092 /// closest to the current cycle.
1093 static unsigned closestSucc(const SUnit *SU) {
1094 unsigned MaxHeight = 0;
1095 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1097 if (I->isCtrl()) continue; // ignore chain succs
1098 unsigned Height = I->getSUnit()->getHeight();
1099 // If there are bunch of CopyToRegs stacked up, they should be considered
1100 // to be at the same position.
1101 if (I->getSUnit()->getNode() &&
1102 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
1103 Height = closestSucc(I->getSUnit())+1;
1104 if (Height > MaxHeight)
1110 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
1111 /// for scratch registers, i.e. number of data dependencies.
1112 static unsigned calcMaxScratches(const SUnit *SU) {
1113 unsigned Scratches = 0;
1114 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1116 if (I->isCtrl()) continue; // ignore chain preds
1123 bool bu_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1124 unsigned LPriority = SPQ->getNodePriority(left);
1125 unsigned RPriority = SPQ->getNodePriority(right);
1126 if (LPriority != RPriority)
1127 return LPriority > RPriority;
1129 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
1134 // and the following instructions are both ready.
1138 // Then schedule t2 = op first.
1145 // This creates more short live intervals.
1146 unsigned LDist = closestSucc(left);
1147 unsigned RDist = closestSucc(right);
1149 return LDist < RDist;
1151 // How many registers becomes live when the node is scheduled.
1152 unsigned LScratch = calcMaxScratches(left);
1153 unsigned RScratch = calcMaxScratches(right);
1154 if (LScratch != RScratch)
1155 return LScratch > RScratch;
1157 if (left->getHeight() != right->getHeight())
1158 return left->getHeight() > right->getHeight();
1160 if (left->getDepth() != right->getDepth())
1161 return left->getDepth() < right->getDepth();
1163 assert(left->NodeQueueId && right->NodeQueueId &&
1164 "NodeQueueId cannot be zero");
1165 return (left->NodeQueueId > right->NodeQueueId);
1170 RegReductionPriorityQueue<SF>::canClobber(const SUnit *SU, const SUnit *Op) {
1171 if (SU->isTwoAddress) {
1172 unsigned Opc = SU->getNode()->getMachineOpcode();
1173 const TargetInstrDesc &TID = TII->get(Opc);
1174 unsigned NumRes = TID.getNumDefs();
1175 unsigned NumOps = TID.getNumOperands() - NumRes;
1176 for (unsigned i = 0; i != NumOps; ++i) {
1177 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
1178 SDNode *DU = SU->getNode()->getOperand(i).getNode();
1179 if (DU->getNodeId() != -1 &&
1180 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
1189 /// hasCopyToRegUse - Return true if SU has a value successor that is a
1191 static bool hasCopyToRegUse(const SUnit *SU) {
1192 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1194 if (I->isCtrl()) continue;
1195 const SUnit *SuccSU = I->getSUnit();
1196 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg)
1202 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
1203 /// physical register defs.
1204 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
1205 const TargetInstrInfo *TII,
1206 const TargetRegisterInfo *TRI) {
1207 SDNode *N = SuccSU->getNode();
1208 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1209 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
1210 assert(ImpDefs && "Caller should check hasPhysRegDefs");
1211 for (const SDNode *SUNode = SU->getNode(); SUNode;
1212 SUNode = SUNode->getFlaggedNode()) {
1213 if (!SUNode->isMachineOpcode())
1215 const unsigned *SUImpDefs =
1216 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
1219 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
1220 EVT VT = N->getValueType(i);
1221 if (VT == MVT::Flag || VT == MVT::Other)
1223 if (!N->hasAnyUseOfValue(i))
1225 unsigned Reg = ImpDefs[i - NumDefs];
1226 for (;*SUImpDefs; ++SUImpDefs) {
1227 unsigned SUReg = *SUImpDefs;
1228 if (TRI->regsOverlap(Reg, SUReg))
1236 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
1237 /// are not handled well by the general register pressure reduction
1238 /// heuristics. When presented with code like this:
1247 /// the heuristics tend to push the store up, but since the
1248 /// operand of the store has another use (U), this would increase
1249 /// the length of that other use (the U->N edge).
1251 /// This function transforms code like the above to route U's
1252 /// dependence through the store when possible, like this:
1263 /// This results in the store being scheduled immediately
1264 /// after N, which shortens the U->N live range, reducing
1265 /// register pressure.
1268 void RegReductionPriorityQueue<SF>::PrescheduleNodesWithMultipleUses() {
1269 // Visit all the nodes in topological order, working top-down.
1270 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1271 SUnit *SU = &(*SUnits)[i];
1272 // For now, only look at nodes with no data successors, such as stores.
1273 // These are especially important, due to the heuristics in
1274 // getNodePriority for nodes with no data successors.
1275 if (SU->NumSuccs != 0)
1277 // For now, only look at nodes with exactly one data predecessor.
1278 if (SU->NumPreds != 1)
1280 // Avoid prescheduling copies to virtual registers, which don't behave
1281 // like other nodes from the perspective of scheduling heuristics.
1282 if (SDNode *N = SU->getNode())
1283 if (N->getOpcode() == ISD::CopyToReg &&
1284 TargetRegisterInfo::isVirtualRegister
1285 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
1288 // Locate the single data predecessor.
1290 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
1291 EE = SU->Preds.end(); II != EE; ++II)
1292 if (!II->isCtrl()) {
1293 PredSU = II->getSUnit();
1298 // Don't rewrite edges that carry physregs, because that requires additional
1299 // support infrastructure.
1300 if (PredSU->hasPhysRegDefs)
1302 // Short-circuit the case where SU is PredSU's only data successor.
1303 if (PredSU->NumSuccs == 1)
1305 // Avoid prescheduling to copies from virtual registers, which don't behave
1306 // like other nodes from the perspective of scheduling // heuristics.
1307 if (SDNode *N = SU->getNode())
1308 if (N->getOpcode() == ISD::CopyFromReg &&
1309 TargetRegisterInfo::isVirtualRegister
1310 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
1313 // Perform checks on the successors of PredSU.
1314 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
1315 EE = PredSU->Succs.end(); II != EE; ++II) {
1316 SUnit *PredSuccSU = II->getSUnit();
1317 if (PredSuccSU == SU) continue;
1318 // If PredSU has another successor with no data successors, for
1319 // now don't attempt to choose either over the other.
1320 if (PredSuccSU->NumSuccs == 0)
1321 goto outer_loop_continue;
1322 // Don't break physical register dependencies.
1323 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
1324 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
1325 goto outer_loop_continue;
1326 // Don't introduce graph cycles.
1327 if (scheduleDAG->IsReachable(SU, PredSuccSU))
1328 goto outer_loop_continue;
1331 // Ok, the transformation is safe and the heuristics suggest it is
1332 // profitable. Update the graph.
1333 DEBUG(errs() << "Prescheduling SU # " << SU->NodeNum
1334 << " next to PredSU # " << PredSU->NodeNum
1335 << " to guide scheduling in the presence of multiple uses\n");
1336 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
1337 SDep Edge = PredSU->Succs[i];
1338 assert(!Edge.isAssignedRegDep());
1339 SUnit *SuccSU = Edge.getSUnit();
1341 Edge.setSUnit(PredSU);
1342 scheduleDAG->RemovePred(SuccSU, Edge);
1343 scheduleDAG->AddPred(SU, Edge);
1345 scheduleDAG->AddPred(SuccSU, Edge);
1349 outer_loop_continue:;
1353 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
1354 /// it as a def&use operand. Add a pseudo control edge from it to the other
1355 /// node (if it won't create a cycle) so the two-address one will be scheduled
1356 /// first (lower in the schedule). If both nodes are two-address, favor the
1357 /// one that has a CopyToReg use (more likely to be a loop induction update).
1358 /// If both are two-address, but one is commutable while the other is not
1359 /// commutable, favor the one that's not commutable.
1361 void RegReductionPriorityQueue<SF>::AddPseudoTwoAddrDeps() {
1362 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
1363 SUnit *SU = &(*SUnits)[i];
1364 if (!SU->isTwoAddress)
1367 SDNode *Node = SU->getNode();
1368 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getFlaggedNode())
1371 unsigned Opc = Node->getMachineOpcode();
1372 const TargetInstrDesc &TID = TII->get(Opc);
1373 unsigned NumRes = TID.getNumDefs();
1374 unsigned NumOps = TID.getNumOperands() - NumRes;
1375 for (unsigned j = 0; j != NumOps; ++j) {
1376 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
1378 SDNode *DU = SU->getNode()->getOperand(j).getNode();
1379 if (DU->getNodeId() == -1)
1381 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
1382 if (!DUSU) continue;
1383 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
1384 E = DUSU->Succs.end(); I != E; ++I) {
1385 if (I->isCtrl()) continue;
1386 SUnit *SuccSU = I->getSUnit();
1389 // Be conservative. Ignore if nodes aren't at roughly the same
1390 // depth and height.
1391 if (SuccSU->getHeight() < SU->getHeight() &&
1392 (SU->getHeight() - SuccSU->getHeight()) > 1)
1394 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
1395 // constrains whatever is using the copy, instead of the copy
1396 // itself. In the case that the copy is coalesced, this
1397 // preserves the intent of the pseudo two-address heurietics.
1398 while (SuccSU->Succs.size() == 1 &&
1399 SuccSU->getNode()->isMachineOpcode() &&
1400 SuccSU->getNode()->getMachineOpcode() ==
1401 TargetInstrInfo::COPY_TO_REGCLASS)
1402 SuccSU = SuccSU->Succs.front().getSUnit();
1403 // Don't constrain non-instruction nodes.
1404 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
1406 // Don't constrain nodes with physical register defs if the
1407 // predecessor can clobber them.
1408 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
1409 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
1412 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
1413 // these may be coalesced away. We want them close to their uses.
1414 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
1415 if (SuccOpc == TargetInstrInfo::EXTRACT_SUBREG ||
1416 SuccOpc == TargetInstrInfo::INSERT_SUBREG ||
1417 SuccOpc == TargetInstrInfo::SUBREG_TO_REG)
1419 if ((!canClobber(SuccSU, DUSU) ||
1420 (hasCopyToRegUse(SU) && !hasCopyToRegUse(SuccSU)) ||
1421 (!SU->isCommutable && SuccSU->isCommutable)) &&
1422 !scheduleDAG->IsReachable(SuccSU, SU)) {
1423 DEBUG(errs() << "Adding a pseudo-two-addr edge from SU # "
1424 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
1425 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
1426 /*Reg=*/0, /*isNormalMemory=*/false,
1427 /*isMustAlias=*/false,
1428 /*isArtificial=*/true));
1435 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1436 /// scheduling units.
1438 void RegReductionPriorityQueue<SF>::CalculateSethiUllmanNumbers() {
1439 SethiUllmanNumbers.assign(SUnits->size(), 0);
1441 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1442 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1445 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
1446 /// predecessors of the successors of the SUnit SU. Stop when the provided
1447 /// limit is exceeded.
1448 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
1451 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1453 const SUnit *SuccSU = I->getSUnit();
1454 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
1455 EE = SuccSU->Preds.end(); II != EE; ++II) {
1456 SUnit *PredSU = II->getSUnit();
1457 if (!PredSU->isScheduled)
1467 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
1468 unsigned LPriority = SPQ->getNodePriority(left);
1469 unsigned RPriority = SPQ->getNodePriority(right);
1470 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
1471 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
1472 bool LIsFloater = LIsTarget && left->NumPreds == 0;
1473 bool RIsFloater = RIsTarget && right->NumPreds == 0;
1474 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
1475 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
1477 if (left->NumSuccs == 0 && right->NumSuccs != 0)
1479 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
1486 if (left->NumSuccs == 1)
1488 if (right->NumSuccs == 1)
1491 if (LPriority+LBonus != RPriority+RBonus)
1492 return LPriority+LBonus < RPriority+RBonus;
1494 if (left->getDepth() != right->getDepth())
1495 return left->getDepth() < right->getDepth();
1497 if (left->NumSuccsLeft != right->NumSuccsLeft)
1498 return left->NumSuccsLeft > right->NumSuccsLeft;
1500 assert(left->NodeQueueId && right->NodeQueueId &&
1501 "NodeQueueId cannot be zero");
1502 return (left->NodeQueueId > right->NodeQueueId);
1505 //===----------------------------------------------------------------------===//
1506 // Public Constructor Functions
1507 //===----------------------------------------------------------------------===//
1509 llvm::ScheduleDAGSDNodes *
1510 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
1511 const TargetMachine &TM = IS->TM;
1512 const TargetInstrInfo *TII = TM.getInstrInfo();
1513 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1515 BURegReductionPriorityQueue *PQ = new BURegReductionPriorityQueue(TII, TRI);
1517 ScheduleDAGRRList *SD =
1518 new ScheduleDAGRRList(*IS->MF, true, PQ);
1519 PQ->setScheduleDAG(SD);
1523 llvm::ScheduleDAGSDNodes *
1524 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS, CodeGenOpt::Level) {
1525 const TargetMachine &TM = IS->TM;
1526 const TargetInstrInfo *TII = TM.getInstrInfo();
1527 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
1529 TDRegReductionPriorityQueue *PQ = new TDRegReductionPriorityQueue(TII, TRI);
1531 ScheduleDAGRRList *SD =
1532 new ScheduleDAGRRList(*IS->MF, false, PQ);
1533 PQ->setScheduleDAG(SD);