1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the ScheduleDAG class, which is used as the common
11 // base class for instruction schedulers. This encapsulates the scheduling DAG,
12 // which is shared between SelectionDAG and MachineInstr scheduling.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 #define LLVM_CODEGEN_SCHEDULEDAG_H
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/GraphTraits.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/Target/TargetLowering.h"
29 class MachineConstantPool;
30 class MachineFunction;
31 class MachineRegisterInfo;
33 struct MCSchedClassDesc;
34 class TargetRegisterInfo;
37 class TargetInstrInfo;
40 class TargetRegisterClass;
41 template<class Graph> class GraphWriter;
43 /// SDep - Scheduling dependency. This represents one direction of an
44 /// edge in the scheduling DAG.
47 /// Kind - These are the different kinds of scheduling dependencies.
49 Data, ///< Regular data dependence (aka true-dependence).
50 Anti, ///< A register anti-dependedence (aka WAR).
51 Output, ///< A register output-dependence (aka WAW).
52 Order ///< Any other ordering dependency.
55 // Strong dependencies must be respected by the scheduler. Artificial
56 // dependencies may be removed only if they are redundant with another
59 // Weak dependencies may be violated by the scheduling strategy, but only if
60 // the strategy can prove it is correct to do so.
62 // Strong OrderKinds must occur before "Weak".
63 // Weak OrderKinds must occur after "Weak".
65 Barrier, ///< An unknown scheduling barrier.
66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
68 Artificial, ///< Arbitrary strong DAG edge (no real dependence).
69 Weak, ///< Arbitrary weak DAG edge.
70 Cluster ///< Weak DAG edge linking a chain of clustered instrs.
74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum
75 /// indicating the kind of the dependency.
76 PointerIntPair<SUnit *, 2, Kind> Dep;
78 /// Contents - A union discriminated by the dependence kind.
80 /// Reg - For Data, Anti, and Output dependencies, the associated
81 /// register. For Data dependencies that don't currently have a register
82 /// assigned, this is set to zero.
85 /// Order - Additional information about Order dependencies.
86 unsigned OrdKind; // enum OrderKind
89 /// Latency - The time associated with this edge. Often this is just
90 /// the value of the Latency field of the predecessor, however advanced
91 /// models may provide additional information about specific edges.
95 /// SDep - Construct a null SDep. This is only for use by container
96 /// classes which require default constructors. SUnits may not
97 /// have null SDep edges.
98 SDep() : Dep(nullptr, Data) {}
100 /// SDep - Construct an SDep with the specified values.
101 SDep(SUnit *S, Kind kind, unsigned Reg)
102 : Dep(S, kind), Contents() {
105 llvm_unreachable("Reg given for non-register dependence!");
109 "SDep::Anti and SDep::Output must use a non-zero Reg!");
119 SDep(SUnit *S, OrderKind kind)
120 : Dep(S, Order), Contents(), Latency(0) {
121 Contents.OrdKind = kind;
124 /// Return true if the specified SDep is equivalent except for latency.
125 bool overlaps(const SDep &Other) const;
127 bool operator==(const SDep &Other) const {
128 return overlaps(Other) && Latency == Other.Latency;
131 bool operator!=(const SDep &Other) const {
132 return !operator==(Other);
135 /// getLatency - Return the latency value for this edge, which roughly
136 /// means the minimum number of cycles that must elapse between the
137 /// predecessor and the successor, given that they have this edge
139 unsigned getLatency() const {
143 /// setLatency - Set the latency for this edge.
144 void setLatency(unsigned Lat) {
148 //// getSUnit - Return the SUnit to which this edge points.
149 SUnit *getSUnit() const;
151 //// setSUnit - Assign the SUnit to which this edge points.
152 void setSUnit(SUnit *SU);
154 /// getKind - Return an enum value representing the kind of the dependence.
155 Kind getKind() const;
157 /// isCtrl - Shorthand for getKind() != SDep::Data.
158 bool isCtrl() const {
159 return getKind() != Data;
162 /// isNormalMemory - Test if this is an Order dependence between two
163 /// memory accesses where both sides of the dependence access memory
164 /// in non-volatile and fully modeled ways.
165 bool isNormalMemory() const {
166 return getKind() == Order && (Contents.OrdKind == MayAliasMem
167 || Contents.OrdKind == MustAliasMem);
170 /// isBarrier - Test if this is an Order dependence that is marked
172 bool isBarrier() const {
173 return getKind() == Order && Contents.OrdKind == Barrier;
176 /// isNormalMemoryOrBarrier - Test if this is could be any kind of memory
178 bool isNormalMemoryOrBarrier() const {
179 return (isNormalMemory() || isBarrier());
182 /// isMustAlias - Test if this is an Order dependence that is marked
183 /// as "must alias", meaning that the SUnits at either end of the edge
184 /// have a memory dependence on a known memory location.
185 bool isMustAlias() const {
186 return getKind() == Order && Contents.OrdKind == MustAliasMem;
189 /// isWeak - Test if this a weak dependence. Weak dependencies are
190 /// considered DAG edges for height computation and other heuristics, but do
191 /// not force ordering. Breaking a weak edge may require the scheduler to
192 /// compensate, for example by inserting a copy.
193 bool isWeak() const {
194 return getKind() == Order && Contents.OrdKind >= Weak;
197 /// isArtificial - Test if this is an Order dependence that is marked
198 /// as "artificial", meaning it isn't necessary for correctness.
199 bool isArtificial() const {
200 return getKind() == Order && Contents.OrdKind == Artificial;
203 /// isCluster - Test if this is an Order dependence that is marked
204 /// as "cluster", meaning it is artificial and wants to be adjacent.
205 bool isCluster() const {
206 return getKind() == Order && Contents.OrdKind == Cluster;
209 /// isAssignedRegDep - Test if this is a Data dependence that is
210 /// associated with a register.
211 bool isAssignedRegDep() const {
212 return getKind() == Data && Contents.Reg != 0;
215 /// getReg - Return the register associated with this edge. This is
216 /// only valid on Data, Anti, and Output edges. On Data edges, this
217 /// value may be zero, meaning there is no associated register.
218 unsigned getReg() const {
219 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
220 "getReg called on non-register dependence edge!");
224 /// setReg - Assign the associated register for this edge. This is
225 /// only valid on Data, Anti, and Output edges. On Anti and Output
226 /// edges, this value must not be zero. On Data edges, the value may
227 /// be zero, which would mean that no specific register is associated
229 void setReg(unsigned Reg) {
230 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
231 "setReg called on non-register dependence edge!");
232 assert((getKind() != Anti || Reg != 0) &&
233 "SDep::Anti edge cannot use the zero register!");
234 assert((getKind() != Output || Reg != 0) &&
235 "SDep::Output edge cannot use the zero register!");
241 struct isPodLike<SDep> { static const bool value = true; };
243 /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
246 enum : unsigned { BoundaryID = ~0u };
248 SDNode *Node; // Representative node.
249 MachineInstr *Instr; // Alternatively, a MachineInstr.
251 SUnit *OrigNode; // If not this, the node from which
252 // this node was cloned.
253 // (SD scheduling only)
255 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
257 // Preds/Succs - The SUnits before/after us in the graph.
258 SmallVector<SDep, 4> Preds; // All sunit predecessors.
259 SmallVector<SDep, 4> Succs; // All sunit successors.
261 typedef SmallVectorImpl<SDep>::iterator pred_iterator;
262 typedef SmallVectorImpl<SDep>::iterator succ_iterator;
263 typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
264 typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
266 unsigned NodeNum; // Entry # of node in the node vector.
267 unsigned NodeQueueId; // Queue id of node.
268 unsigned NumPreds; // # of SDep::Data preds.
269 unsigned NumSuccs; // # of SDep::Data sucss.
270 unsigned NumPredsLeft; // # of preds not scheduled.
271 unsigned NumSuccsLeft; // # of succs not scheduled.
272 unsigned WeakPredsLeft; // # of weak preds not scheduled.
273 unsigned WeakSuccsLeft; // # of weak succs not scheduled.
274 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
275 unsigned short Latency; // Node latency.
276 bool isVRegCycle : 1; // May use and def the same vreg.
277 bool isCall : 1; // Is a function call.
278 bool isCallOp : 1; // Is a function call operand.
279 bool isTwoAddress : 1; // Is a two-address instruction.
280 bool isCommutable : 1; // Is a commutable instruction.
281 bool hasPhysRegUses : 1; // Has physreg uses.
282 bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
283 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
284 bool isPending : 1; // True once pending.
285 bool isAvailable : 1; // True once available.
286 bool isScheduled : 1; // True once scheduled.
287 bool isScheduleHigh : 1; // True if preferable to schedule high.
288 bool isScheduleLow : 1; // True if preferable to schedule low.
289 bool isCloned : 1; // True if this node has been cloned.
290 bool isUnbuffered : 1; // Uses an unbuffered resource.
291 bool hasReservedResource : 1; // Uses a reserved resource.
292 Sched::Preference SchedulingPref; // Scheduling preference.
295 bool isDepthCurrent : 1; // True if Depth is current.
296 bool isHeightCurrent : 1; // True if Height is current.
297 unsigned Depth; // Node depth.
298 unsigned Height; // Node height.
300 unsigned TopReadyCycle; // Cycle relative to start when node is ready.
301 unsigned BotReadyCycle; // Cycle relative to end when node is ready.
303 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
304 const TargetRegisterClass *CopySrcRC;
306 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
307 /// an SDNode and any nodes flagged to it.
308 SUnit(SDNode *node, unsigned nodenum)
309 : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
310 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
311 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
312 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
313 isCallOp(false), isTwoAddress(false), isCommutable(false),
314 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
315 isPending(false), isAvailable(false), isScheduled(false),
316 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
317 isUnbuffered(false), hasReservedResource(false),
318 SchedulingPref(Sched::None), isDepthCurrent(false),
319 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
320 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
322 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
324 SUnit(MachineInstr *instr, unsigned nodenum)
325 : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
326 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
327 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
328 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
329 isCallOp(false), isTwoAddress(false), isCommutable(false),
330 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
331 isPending(false), isAvailable(false), isScheduled(false),
332 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
333 isUnbuffered(false), hasReservedResource(false),
334 SchedulingPref(Sched::None), isDepthCurrent(false),
335 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
336 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
338 /// SUnit - Construct a placeholder SUnit.
340 : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
341 NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
342 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
343 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
344 isCallOp(false), isTwoAddress(false), isCommutable(false),
345 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
346 isPending(false), isAvailable(false), isScheduled(false),
347 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
348 isUnbuffered(false), hasReservedResource(false),
349 SchedulingPref(Sched::None), isDepthCurrent(false),
350 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
351 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
353 /// \brief Boundary nodes are placeholders for the boundary of the
354 /// scheduling region.
356 /// BoundaryNodes can have DAG edges, including Data edges, but they do not
357 /// correspond to schedulable entities (e.g. instructions) and do not have a
358 /// valid ID. Consequently, always check for boundary nodes before accessing
359 /// an assoicative data structure keyed on node ID.
360 bool isBoundaryNode() const { return NodeNum == BoundaryID; }
362 /// setNode - Assign the representative SDNode for this SUnit.
363 /// This may be used during pre-regalloc scheduling.
364 void setNode(SDNode *N) {
365 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
369 /// getNode - Return the representative SDNode for this SUnit.
370 /// This may be used during pre-regalloc scheduling.
371 SDNode *getNode() const {
372 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
376 /// isInstr - Return true if this SUnit refers to a machine instruction as
377 /// opposed to an SDNode.
378 bool isInstr() const { return Instr; }
380 /// setInstr - Assign the instruction for the SUnit.
381 /// This may be used during post-regalloc scheduling.
382 void setInstr(MachineInstr *MI) {
383 assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
387 /// getInstr - Return the representative MachineInstr for this SUnit.
388 /// This may be used during post-regalloc scheduling.
389 MachineInstr *getInstr() const {
390 assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
394 /// addPred - This adds the specified edge as a pred of the current node if
395 /// not already. It also adds the current node as a successor of the
397 bool addPred(const SDep &D, bool Required = true);
399 /// removePred - This removes the specified edge as a pred of the current
400 /// node if it exists. It also removes the current node as a successor of
401 /// the specified node.
402 void removePred(const SDep &D);
404 /// getDepth - Return the depth of this node, which is the length of the
405 /// maximum path up to any node which has no predecessors.
406 unsigned getDepth() const {
408 const_cast<SUnit *>(this)->ComputeDepth();
412 /// getHeight - Return the height of this node, which is the length of the
413 /// maximum path down to any node which has no successors.
414 unsigned getHeight() const {
415 if (!isHeightCurrent)
416 const_cast<SUnit *>(this)->ComputeHeight();
420 /// setDepthToAtLeast - If NewDepth is greater than this node's
421 /// depth value, set it to be the new depth value. This also
422 /// recursively marks successor nodes dirty.
423 void setDepthToAtLeast(unsigned NewDepth);
425 /// setDepthToAtLeast - If NewDepth is greater than this node's
426 /// depth value, set it to be the new height value. This also
427 /// recursively marks predecessor nodes dirty.
428 void setHeightToAtLeast(unsigned NewHeight);
430 /// setDepthDirty - Set a flag in this node to indicate that its
431 /// stored Depth value will require recomputation the next time
432 /// getDepth() is called.
433 void setDepthDirty();
435 /// setHeightDirty - Set a flag in this node to indicate that its
436 /// stored Height value will require recomputation the next time
437 /// getHeight() is called.
438 void setHeightDirty();
440 /// isPred - Test if node N is a predecessor of this node.
441 bool isPred(SUnit *N) {
442 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
443 if (Preds[i].getSUnit() == N)
448 /// isSucc - Test if node N is a successor of this node.
449 bool isSucc(SUnit *N) {
450 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
451 if (Succs[i].getSUnit() == N)
456 bool isTopReady() const {
457 return NumPredsLeft == 0;
459 bool isBottomReady() const {
460 return NumSuccsLeft == 0;
463 /// \brief Order this node's predecessor edges such that the critical path
464 /// edge occurs first.
465 void biasCriticalPath();
467 void dump(const ScheduleDAG *G) const;
468 void dumpAll(const ScheduleDAG *G) const;
469 void print(raw_ostream &O, const ScheduleDAG *G) const;
473 void ComputeHeight();
476 /// Return true if the specified SDep is equivalent except for latency.
477 inline bool SDep::overlaps(const SDep &Other) const {
478 if (Dep != Other.Dep)
480 switch (Dep.getInt()) {
484 return Contents.Reg == Other.Contents.Reg;
486 return Contents.OrdKind == Other.Contents.OrdKind;
488 llvm_unreachable("Invalid dependency kind!");
491 //// getSUnit - Return the SUnit to which this edge points.
492 inline SUnit *SDep::getSUnit() const { return Dep.getPointer(); }
494 //// setSUnit - Assign the SUnit to which this edge points.
495 inline void SDep::setSUnit(SUnit *SU) { Dep.setPointer(SU); }
497 /// getKind - Return an enum value representing the kind of the dependence.
498 inline SDep::Kind SDep::getKind() const { return Dep.getInt(); }
500 //===--------------------------------------------------------------------===//
501 /// SchedulingPriorityQueue - This interface is used to plug different
502 /// priorities computation algorithms into the list scheduler. It implements
503 /// the interface of a standard priority queue, where nodes are inserted in
504 /// arbitrary order and returned in priority order. The computation of the
505 /// priority and the representation of the queue are totally up to the
506 /// implementation to decide.
508 class SchedulingPriorityQueue {
509 virtual void anchor();
513 SchedulingPriorityQueue(bool rf = false):
514 CurCycle(0), HasReadyFilter(rf) {}
515 virtual ~SchedulingPriorityQueue() {}
517 virtual bool isBottomUp() const = 0;
519 virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
520 virtual void addNode(const SUnit *SU) = 0;
521 virtual void updateNode(const SUnit *SU) = 0;
522 virtual void releaseState() = 0;
524 virtual bool empty() const = 0;
526 bool hasReadyFilter() const { return HasReadyFilter; }
528 virtual bool tracksRegPressure() const { return false; }
530 virtual bool isReady(SUnit *) const {
531 assert(!HasReadyFilter && "The ready filter must override isReady()");
534 virtual void push(SUnit *U) = 0;
536 void push_all(const std::vector<SUnit *> &Nodes) {
537 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
538 E = Nodes.end(); I != E; ++I)
542 virtual SUnit *pop() = 0;
544 virtual void remove(SUnit *SU) = 0;
546 virtual void dump(ScheduleDAG *) const {}
548 /// scheduledNode - As each node is scheduled, this method is invoked. This
549 /// allows the priority function to adjust the priority of related
550 /// unscheduled nodes, for example.
552 virtual void scheduledNode(SUnit *) {}
554 virtual void unscheduledNode(SUnit *) {}
556 void setCurCycle(unsigned Cycle) {
560 unsigned getCurCycle() const {
567 const TargetMachine &TM; // Target processor
568 const TargetInstrInfo *TII; // Target instruction information
569 const TargetRegisterInfo *TRI; // Target processor register info
570 MachineFunction &MF; // Machine function
571 MachineRegisterInfo &MRI; // Virtual/real register map
572 std::vector<SUnit> SUnits; // The scheduling units.
573 SUnit EntrySU; // Special node for the region entry.
574 SUnit ExitSU; // Special node for the region exit.
577 static const bool StressSched = false;
582 explicit ScheduleDAG(MachineFunction &mf);
584 virtual ~ScheduleDAG();
586 /// clearDAG - clear the DAG state (between regions).
589 /// getInstrDesc - Return the MCInstrDesc of this SUnit.
590 /// Return NULL for SDNodes without a machine opcode.
591 const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
592 if (SU->isInstr()) return &SU->getInstr()->getDesc();
593 return getNodeDesc(SU->getNode());
596 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
599 virtual void viewGraph(const Twine &Name, const Twine &Title);
600 virtual void viewGraph();
602 virtual void dumpNode(const SUnit *SU) const = 0;
604 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
605 /// of the ScheduleDAG.
606 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
608 /// getDAGLabel - Return a label for the region of code covered by the DAG.
609 virtual std::string getDAGName() const = 0;
611 /// addCustomGraphFeatures - Add custom features for a visualization of
613 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
616 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
617 /// their state is consistent. Return the number of scheduled SUnits.
618 unsigned VerifyScheduledDAG(bool isBottomUp);
622 // Return the MCInstrDesc of this SDNode or NULL.
623 const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
626 class SUnitIterator : public std::iterator<std::forward_iterator_tag,
631 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
633 bool operator==(const SUnitIterator& x) const {
634 return Operand == x.Operand;
636 bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
638 pointer operator*() const {
639 return Node->Preds[Operand].getSUnit();
641 pointer operator->() const { return operator*(); }
643 SUnitIterator& operator++() { // Preincrement
647 SUnitIterator operator++(int) { // Postincrement
648 SUnitIterator tmp = *this; ++*this; return tmp;
651 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
652 static SUnitIterator end (SUnit *N) {
653 return SUnitIterator(N, (unsigned)N->Preds.size());
656 unsigned getOperand() const { return Operand; }
657 const SUnit *getNode() const { return Node; }
658 /// isCtrlDep - Test if this is not an SDep::Data dependence.
659 bool isCtrlDep() const {
660 return getSDep().isCtrl();
662 bool isArtificialDep() const {
663 return getSDep().isArtificial();
665 const SDep &getSDep() const {
666 return Node->Preds[Operand];
670 template <> struct GraphTraits<SUnit*> {
671 typedef SUnit NodeType;
672 typedef SUnitIterator ChildIteratorType;
673 static inline NodeType *getEntryNode(SUnit *N) { return N; }
674 static inline ChildIteratorType child_begin(NodeType *N) {
675 return SUnitIterator::begin(N);
677 static inline ChildIteratorType child_end(NodeType *N) {
678 return SUnitIterator::end(N);
682 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
683 typedef std::vector<SUnit>::iterator nodes_iterator;
684 static nodes_iterator nodes_begin(ScheduleDAG *G) {
685 return G->SUnits.begin();
687 static nodes_iterator nodes_end(ScheduleDAG *G) {
688 return G->SUnits.end();
692 /// ScheduleDAGTopologicalSort is a class that computes a topological
693 /// ordering for SUnits and provides methods for dynamically updating
694 /// the ordering as new edges are added.
696 /// This allows a very fast implementation of IsReachable, for example.
698 class ScheduleDAGTopologicalSort {
699 /// SUnits - A reference to the ScheduleDAG's SUnits.
700 std::vector<SUnit> &SUnits;
703 /// Index2Node - Maps topological index to the node number.
704 std::vector<int> Index2Node;
705 /// Node2Index - Maps the node number to its topological index.
706 std::vector<int> Node2Index;
707 /// Visited - a set of nodes visited during a DFS traversal.
710 /// DFS - make a DFS traversal and mark all nodes affected by the
711 /// edge insertion. These nodes will later get new topological indexes
712 /// by means of the Shift method.
713 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
715 /// Shift - reassign topological indexes for the nodes in the DAG
716 /// to preserve the topological ordering.
717 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
719 /// Allocate - assign the topological index to the node n.
720 void Allocate(int n, int index);
723 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
725 /// InitDAGTopologicalSorting - create the initial topological
726 /// ordering from the DAG to be scheduled.
727 void InitDAGTopologicalSorting();
729 /// IsReachable - Checks if SU is reachable from TargetSU.
730 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
732 /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
733 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
735 /// AddPred - Updates the topological ordering to accommodate an edge
736 /// to be added from SUnit X to SUnit Y.
737 void AddPred(SUnit *Y, SUnit *X);
739 /// RemovePred - Updates the topological ordering to accommodate an
740 /// an edge to be removed from the specified node N from the predecessors
741 /// of the current node M.
742 void RemovePred(SUnit *M, SUnit *N);
744 typedef std::vector<int>::iterator iterator;
745 typedef std::vector<int>::const_iterator const_iterator;
746 iterator begin() { return Index2Node.begin(); }
747 const_iterator begin() const { return Index2Node.begin(); }
748 iterator end() { return Index2Node.end(); }
749 const_iterator end() const { return Index2Node.end(); }
751 typedef std::vector<int>::reverse_iterator reverse_iterator;
752 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
753 reverse_iterator rbegin() { return Index2Node.rbegin(); }
754 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
755 reverse_iterator rend() { return Index2Node.rend(); }
756 const_reverse_iterator rend() const { return Index2Node.rend(); }