1 //===------- llvm/CodeGen/ScheduleDAG.h - Common Base Class------*- C++ -*-===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the ScheduleDAG class, which is used as the common
11 // base class for instruction schedulers. This encapsulates the scheduling DAG,
12 // which is shared between SelectionDAG and MachineInstr scheduling.
14 //===----------------------------------------------------------------------===//
16 #ifndef LLVM_CODEGEN_SCHEDULEDAG_H
17 #define LLVM_CODEGEN_SCHEDULEDAG_H
19 #include "llvm/ADT/BitVector.h"
20 #include "llvm/ADT/GraphTraits.h"
21 #include "llvm/ADT/PointerIntPair.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/CodeGen/MachineInstr.h"
24 #include "llvm/Target/TargetLowering.h"
29 class MachineConstantPool;
30 class MachineFunction;
31 class MachineRegisterInfo;
33 struct MCSchedClassDesc;
34 class TargetRegisterInfo;
37 class TargetInstrInfo;
40 class TargetRegisterClass;
41 template<class Graph> class GraphWriter;
43 /// SDep - Scheduling dependency. This represents one direction of an
44 /// edge in the scheduling DAG.
47 /// Kind - These are the different kinds of scheduling dependencies.
49 Data, ///< Regular data dependence (aka true-dependence).
50 Anti, ///< A register anti-dependedence (aka WAR).
51 Output, ///< A register output-dependence (aka WAW).
52 Order ///< Any other ordering dependency.
55 // Strong dependencies must be respected by the scheduler. Artificial
56 // dependencies may be removed only if they are redundant with another
59 // Weak dependencies may be violated by the scheduling strategy, but only if
60 // the strategy can prove it is correct to do so.
62 // Strong OrderKinds must occur before "Weak".
63 // Weak OrderKinds must occur after "Weak".
65 Barrier, ///< An unknown scheduling barrier.
66 MayAliasMem, ///< Nonvolatile load/Store instructions that may alias.
67 MustAliasMem, ///< Nonvolatile load/Store instructions that must alias.
68 Artificial, ///< Arbitrary strong DAG edge (no real dependence).
69 Weak, ///< Arbitrary weak DAG edge.
70 Cluster ///< Weak DAG edge linking a chain of clustered instrs.
74 /// Dep - A pointer to the depending/depended-on SUnit, and an enum
75 /// indicating the kind of the dependency.
76 PointerIntPair<SUnit *, 2, Kind> Dep;
78 /// Contents - A union discriminated by the dependence kind.
80 /// Reg - For Data, Anti, and Output dependencies, the associated
81 /// register. For Data dependencies that don't currently have a register
82 /// assigned, this is set to zero.
85 /// Order - Additional information about Order dependencies.
86 unsigned OrdKind; // enum OrderKind
89 /// Latency - The time associated with this edge. Often this is just
90 /// the value of the Latency field of the predecessor, however advanced
91 /// models may provide additional information about specific edges.
95 /// SDep - Construct a null SDep. This is only for use by container
96 /// classes which require default constructors. SUnits may not
97 /// have null SDep edges.
98 SDep() : Dep(nullptr, Data) {}
100 /// SDep - Construct an SDep with the specified values.
101 SDep(SUnit *S, Kind kind, unsigned Reg)
102 : Dep(S, kind), Contents() {
105 llvm_unreachable("Reg given for non-register dependence!");
109 "SDep::Anti and SDep::Output must use a non-zero Reg!");
119 SDep(SUnit *S, OrderKind kind)
120 : Dep(S, Order), Contents(), Latency(0) {
121 Contents.OrdKind = kind;
124 /// Return true if the specified SDep is equivalent except for latency.
125 bool overlaps(const SDep &Other) const {
126 if (Dep != Other.Dep) return false;
127 switch (Dep.getInt()) {
131 return Contents.Reg == Other.Contents.Reg;
133 return Contents.OrdKind == Other.Contents.OrdKind;
135 llvm_unreachable("Invalid dependency kind!");
138 bool operator==(const SDep &Other) const {
139 return overlaps(Other) && Latency == Other.Latency;
142 bool operator!=(const SDep &Other) const {
143 return !operator==(Other);
146 /// getLatency - Return the latency value for this edge, which roughly
147 /// means the minimum number of cycles that must elapse between the
148 /// predecessor and the successor, given that they have this edge
150 unsigned getLatency() const {
154 /// setLatency - Set the latency for this edge.
155 void setLatency(unsigned Lat) {
159 //// getSUnit - Return the SUnit to which this edge points.
160 SUnit *getSUnit() const {
161 return Dep.getPointer();
164 //// setSUnit - Assign the SUnit to which this edge points.
165 void setSUnit(SUnit *SU) {
169 /// getKind - Return an enum value representing the kind of the dependence.
170 Kind getKind() const {
174 /// isCtrl - Shorthand for getKind() != SDep::Data.
175 bool isCtrl() const {
176 return getKind() != Data;
179 /// isNormalMemory - Test if this is an Order dependence between two
180 /// memory accesses where both sides of the dependence access memory
181 /// in non-volatile and fully modeled ways.
182 bool isNormalMemory() const {
183 return getKind() == Order && (Contents.OrdKind == MayAliasMem
184 || Contents.OrdKind == MustAliasMem);
187 /// isBarrier - Test if this is an Order dependence that is marked
189 bool isBarrier() const {
190 return getKind() == Order && Contents.OrdKind == Barrier;
193 /// isNormalMemoryOrBarrier - Test if this is could be any kind of memory
195 bool isNormalMemoryOrBarrier() const {
196 return (isNormalMemory() || isBarrier());
199 /// isMustAlias - Test if this is an Order dependence that is marked
200 /// as "must alias", meaning that the SUnits at either end of the edge
201 /// have a memory dependence on a known memory location.
202 bool isMustAlias() const {
203 return getKind() == Order && Contents.OrdKind == MustAliasMem;
206 /// isWeak - Test if this a weak dependence. Weak dependencies are
207 /// considered DAG edges for height computation and other heuristics, but do
208 /// not force ordering. Breaking a weak edge may require the scheduler to
209 /// compensate, for example by inserting a copy.
210 bool isWeak() const {
211 return getKind() == Order && Contents.OrdKind >= Weak;
214 /// isArtificial - Test if this is an Order dependence that is marked
215 /// as "artificial", meaning it isn't necessary for correctness.
216 bool isArtificial() const {
217 return getKind() == Order && Contents.OrdKind == Artificial;
220 /// isCluster - Test if this is an Order dependence that is marked
221 /// as "cluster", meaning it is artificial and wants to be adjacent.
222 bool isCluster() const {
223 return getKind() == Order && Contents.OrdKind == Cluster;
226 /// isAssignedRegDep - Test if this is a Data dependence that is
227 /// associated with a register.
228 bool isAssignedRegDep() const {
229 return getKind() == Data && Contents.Reg != 0;
232 /// getReg - Return the register associated with this edge. This is
233 /// only valid on Data, Anti, and Output edges. On Data edges, this
234 /// value may be zero, meaning there is no associated register.
235 unsigned getReg() const {
236 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
237 "getReg called on non-register dependence edge!");
241 /// setReg - Assign the associated register for this edge. This is
242 /// only valid on Data, Anti, and Output edges. On Anti and Output
243 /// edges, this value must not be zero. On Data edges, the value may
244 /// be zero, which would mean that no specific register is associated
246 void setReg(unsigned Reg) {
247 assert((getKind() == Data || getKind() == Anti || getKind() == Output) &&
248 "setReg called on non-register dependence edge!");
249 assert((getKind() != Anti || Reg != 0) &&
250 "SDep::Anti edge cannot use the zero register!");
251 assert((getKind() != Output || Reg != 0) &&
252 "SDep::Output edge cannot use the zero register!");
258 struct isPodLike<SDep> { static const bool value = true; };
260 /// SUnit - Scheduling unit. This is a node in the scheduling DAG.
263 enum : unsigned { BoundaryID = ~0u };
265 SDNode *Node; // Representative node.
266 MachineInstr *Instr; // Alternatively, a MachineInstr.
268 SUnit *OrigNode; // If not this, the node from which
269 // this node was cloned.
270 // (SD scheduling only)
272 const MCSchedClassDesc *SchedClass; // NULL or resolved SchedClass.
274 // Preds/Succs - The SUnits before/after us in the graph.
275 SmallVector<SDep, 4> Preds; // All sunit predecessors.
276 SmallVector<SDep, 4> Succs; // All sunit successors.
278 typedef SmallVectorImpl<SDep>::iterator pred_iterator;
279 typedef SmallVectorImpl<SDep>::iterator succ_iterator;
280 typedef SmallVectorImpl<SDep>::const_iterator const_pred_iterator;
281 typedef SmallVectorImpl<SDep>::const_iterator const_succ_iterator;
283 unsigned NodeNum; // Entry # of node in the node vector.
284 unsigned NodeQueueId; // Queue id of node.
285 unsigned NumPreds; // # of SDep::Data preds.
286 unsigned NumSuccs; // # of SDep::Data sucss.
287 unsigned NumPredsLeft; // # of preds not scheduled.
288 unsigned NumSuccsLeft; // # of succs not scheduled.
289 unsigned WeakPredsLeft; // # of weak preds not scheduled.
290 unsigned WeakSuccsLeft; // # of weak succs not scheduled.
291 unsigned short NumRegDefsLeft; // # of reg defs with no scheduled use.
292 unsigned short Latency; // Node latency.
293 bool isVRegCycle : 1; // May use and def the same vreg.
294 bool isCall : 1; // Is a function call.
295 bool isCallOp : 1; // Is a function call operand.
296 bool isTwoAddress : 1; // Is a two-address instruction.
297 bool isCommutable : 1; // Is a commutable instruction.
298 bool hasPhysRegUses : 1; // Has physreg uses.
299 bool hasPhysRegDefs : 1; // Has physreg defs that are being used.
300 bool hasPhysRegClobbers : 1; // Has any physreg defs, used or not.
301 bool isPending : 1; // True once pending.
302 bool isAvailable : 1; // True once available.
303 bool isScheduled : 1; // True once scheduled.
304 bool isScheduleHigh : 1; // True if preferable to schedule high.
305 bool isScheduleLow : 1; // True if preferable to schedule low.
306 bool isCloned : 1; // True if this node has been cloned.
307 bool isUnbuffered : 1; // Uses an unbuffered resource.
308 bool hasReservedResource : 1; // Uses a reserved resource.
309 Sched::Preference SchedulingPref; // Scheduling preference.
312 bool isDepthCurrent : 1; // True if Depth is current.
313 bool isHeightCurrent : 1; // True if Height is current.
314 unsigned Depth; // Node depth.
315 unsigned Height; // Node height.
317 unsigned TopReadyCycle; // Cycle relative to start when node is ready.
318 unsigned BotReadyCycle; // Cycle relative to end when node is ready.
320 const TargetRegisterClass *CopyDstRC; // Is a special copy node if not null.
321 const TargetRegisterClass *CopySrcRC;
323 /// SUnit - Construct an SUnit for pre-regalloc scheduling to represent
324 /// an SDNode and any nodes flagged to it.
325 SUnit(SDNode *node, unsigned nodenum)
326 : Node(node), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
327 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
328 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
329 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
330 isCallOp(false), isTwoAddress(false), isCommutable(false),
331 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
332 isPending(false), isAvailable(false), isScheduled(false),
333 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
334 isUnbuffered(false), hasReservedResource(false),
335 SchedulingPref(Sched::None), isDepthCurrent(false),
336 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
337 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
339 /// SUnit - Construct an SUnit for post-regalloc scheduling to represent
341 SUnit(MachineInstr *instr, unsigned nodenum)
342 : Node(nullptr), Instr(instr), OrigNode(nullptr), SchedClass(nullptr),
343 NodeNum(nodenum), NodeQueueId(0), NumPreds(0), NumSuccs(0),
344 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
345 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
346 isCallOp(false), isTwoAddress(false), isCommutable(false),
347 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
348 isPending(false), isAvailable(false), isScheduled(false),
349 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
350 isUnbuffered(false), hasReservedResource(false),
351 SchedulingPref(Sched::None), isDepthCurrent(false),
352 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
353 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
355 /// SUnit - Construct a placeholder SUnit.
357 : Node(nullptr), Instr(nullptr), OrigNode(nullptr), SchedClass(nullptr),
358 NodeNum(BoundaryID), NodeQueueId(0), NumPreds(0), NumSuccs(0),
359 NumPredsLeft(0), NumSuccsLeft(0), WeakPredsLeft(0), WeakSuccsLeft(0),
360 NumRegDefsLeft(0), Latency(0), isVRegCycle(false), isCall(false),
361 isCallOp(false), isTwoAddress(false), isCommutable(false),
362 hasPhysRegUses(false), hasPhysRegDefs(false), hasPhysRegClobbers(false),
363 isPending(false), isAvailable(false), isScheduled(false),
364 isScheduleHigh(false), isScheduleLow(false), isCloned(false),
365 isUnbuffered(false), hasReservedResource(false),
366 SchedulingPref(Sched::None), isDepthCurrent(false),
367 isHeightCurrent(false), Depth(0), Height(0), TopReadyCycle(0),
368 BotReadyCycle(0), CopyDstRC(nullptr), CopySrcRC(nullptr) {}
370 /// \brief Boundary nodes are placeholders for the boundary of the
371 /// scheduling region.
373 /// BoundaryNodes can have DAG edges, including Data edges, but they do not
374 /// correspond to schedulable entities (e.g. instructions) and do not have a
375 /// valid ID. Consequently, always check for boundary nodes before accessing
376 /// an assoicative data structure keyed on node ID.
377 bool isBoundaryNode() const { return NodeNum == BoundaryID; }
379 /// setNode - Assign the representative SDNode for this SUnit.
380 /// This may be used during pre-regalloc scheduling.
381 void setNode(SDNode *N) {
382 assert(!Instr && "Setting SDNode of SUnit with MachineInstr!");
386 /// getNode - Return the representative SDNode for this SUnit.
387 /// This may be used during pre-regalloc scheduling.
388 SDNode *getNode() const {
389 assert(!Instr && "Reading SDNode of SUnit with MachineInstr!");
393 /// isInstr - Return true if this SUnit refers to a machine instruction as
394 /// opposed to an SDNode.
395 bool isInstr() const { return Instr; }
397 /// setInstr - Assign the instruction for the SUnit.
398 /// This may be used during post-regalloc scheduling.
399 void setInstr(MachineInstr *MI) {
400 assert(!Node && "Setting MachineInstr of SUnit with SDNode!");
404 /// getInstr - Return the representative MachineInstr for this SUnit.
405 /// This may be used during post-regalloc scheduling.
406 MachineInstr *getInstr() const {
407 assert(!Node && "Reading MachineInstr of SUnit with SDNode!");
411 /// addPred - This adds the specified edge as a pred of the current node if
412 /// not already. It also adds the current node as a successor of the
414 bool addPred(const SDep &D, bool Required = true);
416 /// removePred - This removes the specified edge as a pred of the current
417 /// node if it exists. It also removes the current node as a successor of
418 /// the specified node.
419 void removePred(const SDep &D);
421 /// getDepth - Return the depth of this node, which is the length of the
422 /// maximum path up to any node which has no predecessors.
423 unsigned getDepth() const {
425 const_cast<SUnit *>(this)->ComputeDepth();
429 /// getHeight - Return the height of this node, which is the length of the
430 /// maximum path down to any node which has no successors.
431 unsigned getHeight() const {
432 if (!isHeightCurrent)
433 const_cast<SUnit *>(this)->ComputeHeight();
437 /// setDepthToAtLeast - If NewDepth is greater than this node's
438 /// depth value, set it to be the new depth value. This also
439 /// recursively marks successor nodes dirty.
440 void setDepthToAtLeast(unsigned NewDepth);
442 /// setDepthToAtLeast - If NewDepth is greater than this node's
443 /// depth value, set it to be the new height value. This also
444 /// recursively marks predecessor nodes dirty.
445 void setHeightToAtLeast(unsigned NewHeight);
447 /// setDepthDirty - Set a flag in this node to indicate that its
448 /// stored Depth value will require recomputation the next time
449 /// getDepth() is called.
450 void setDepthDirty();
452 /// setHeightDirty - Set a flag in this node to indicate that its
453 /// stored Height value will require recomputation the next time
454 /// getHeight() is called.
455 void setHeightDirty();
457 /// isPred - Test if node N is a predecessor of this node.
458 bool isPred(SUnit *N) {
459 for (unsigned i = 0, e = (unsigned)Preds.size(); i != e; ++i)
460 if (Preds[i].getSUnit() == N)
465 /// isSucc - Test if node N is a successor of this node.
466 bool isSucc(SUnit *N) {
467 for (unsigned i = 0, e = (unsigned)Succs.size(); i != e; ++i)
468 if (Succs[i].getSUnit() == N)
473 bool isTopReady() const {
474 return NumPredsLeft == 0;
476 bool isBottomReady() const {
477 return NumSuccsLeft == 0;
480 /// \brief Order this node's predecessor edges such that the critical path
481 /// edge occurs first.
482 void biasCriticalPath();
484 void dump(const ScheduleDAG *G) const;
485 void dumpAll(const ScheduleDAG *G) const;
486 void print(raw_ostream &O, const ScheduleDAG *G) const;
490 void ComputeHeight();
493 //===--------------------------------------------------------------------===//
494 /// SchedulingPriorityQueue - This interface is used to plug different
495 /// priorities computation algorithms into the list scheduler. It implements
496 /// the interface of a standard priority queue, where nodes are inserted in
497 /// arbitrary order and returned in priority order. The computation of the
498 /// priority and the representation of the queue are totally up to the
499 /// implementation to decide.
501 class SchedulingPriorityQueue {
502 virtual void anchor();
506 SchedulingPriorityQueue(bool rf = false):
507 CurCycle(0), HasReadyFilter(rf) {}
508 virtual ~SchedulingPriorityQueue() {}
510 virtual bool isBottomUp() const = 0;
512 virtual void initNodes(std::vector<SUnit> &SUnits) = 0;
513 virtual void addNode(const SUnit *SU) = 0;
514 virtual void updateNode(const SUnit *SU) = 0;
515 virtual void releaseState() = 0;
517 virtual bool empty() const = 0;
519 bool hasReadyFilter() const { return HasReadyFilter; }
521 virtual bool tracksRegPressure() const { return false; }
523 virtual bool isReady(SUnit *) const {
524 assert(!HasReadyFilter && "The ready filter must override isReady()");
527 virtual void push(SUnit *U) = 0;
529 void push_all(const std::vector<SUnit *> &Nodes) {
530 for (std::vector<SUnit *>::const_iterator I = Nodes.begin(),
531 E = Nodes.end(); I != E; ++I)
535 virtual SUnit *pop() = 0;
537 virtual void remove(SUnit *SU) = 0;
539 virtual void dump(ScheduleDAG *) const {}
541 /// scheduledNode - As each node is scheduled, this method is invoked. This
542 /// allows the priority function to adjust the priority of related
543 /// unscheduled nodes, for example.
545 virtual void scheduledNode(SUnit *) {}
547 virtual void unscheduledNode(SUnit *) {}
549 void setCurCycle(unsigned Cycle) {
553 unsigned getCurCycle() const {
560 const TargetMachine &TM; // Target processor
561 const TargetInstrInfo *TII; // Target instruction information
562 const TargetRegisterInfo *TRI; // Target processor register info
563 MachineFunction &MF; // Machine function
564 MachineRegisterInfo &MRI; // Virtual/real register map
565 std::vector<SUnit> SUnits; // The scheduling units.
566 SUnit EntrySU; // Special node for the region entry.
567 SUnit ExitSU; // Special node for the region exit.
570 static const bool StressSched = false;
575 explicit ScheduleDAG(MachineFunction &mf);
577 virtual ~ScheduleDAG();
579 /// clearDAG - clear the DAG state (between regions).
582 /// getInstrDesc - Return the MCInstrDesc of this SUnit.
583 /// Return NULL for SDNodes without a machine opcode.
584 const MCInstrDesc *getInstrDesc(const SUnit *SU) const {
585 if (SU->isInstr()) return &SU->getInstr()->getDesc();
586 return getNodeDesc(SU->getNode());
589 /// viewGraph - Pop up a GraphViz/gv window with the ScheduleDAG rendered
592 virtual void viewGraph(const Twine &Name, const Twine &Title);
593 virtual void viewGraph();
595 virtual void dumpNode(const SUnit *SU) const = 0;
597 /// getGraphNodeLabel - Return a label for an SUnit node in a visualization
598 /// of the ScheduleDAG.
599 virtual std::string getGraphNodeLabel(const SUnit *SU) const = 0;
601 /// getDAGLabel - Return a label for the region of code covered by the DAG.
602 virtual std::string getDAGName() const = 0;
604 /// addCustomGraphFeatures - Add custom features for a visualization of
606 virtual void addCustomGraphFeatures(GraphWriter<ScheduleDAG*> &) const {}
609 /// VerifyScheduledDAG - Verify that all SUnits were scheduled and that
610 /// their state is consistent. Return the number of scheduled SUnits.
611 unsigned VerifyScheduledDAG(bool isBottomUp);
615 // Return the MCInstrDesc of this SDNode or NULL.
616 const MCInstrDesc *getNodeDesc(const SDNode *Node) const;
619 class SUnitIterator : public std::iterator<std::forward_iterator_tag,
624 SUnitIterator(SUnit *N, unsigned Op) : Node(N), Operand(Op) {}
626 bool operator==(const SUnitIterator& x) const {
627 return Operand == x.Operand;
629 bool operator!=(const SUnitIterator& x) const { return !operator==(x); }
631 pointer operator*() const {
632 return Node->Preds[Operand].getSUnit();
634 pointer operator->() const { return operator*(); }
636 SUnitIterator& operator++() { // Preincrement
640 SUnitIterator operator++(int) { // Postincrement
641 SUnitIterator tmp = *this; ++*this; return tmp;
644 static SUnitIterator begin(SUnit *N) { return SUnitIterator(N, 0); }
645 static SUnitIterator end (SUnit *N) {
646 return SUnitIterator(N, (unsigned)N->Preds.size());
649 unsigned getOperand() const { return Operand; }
650 const SUnit *getNode() const { return Node; }
651 /// isCtrlDep - Test if this is not an SDep::Data dependence.
652 bool isCtrlDep() const {
653 return getSDep().isCtrl();
655 bool isArtificialDep() const {
656 return getSDep().isArtificial();
658 const SDep &getSDep() const {
659 return Node->Preds[Operand];
663 template <> struct GraphTraits<SUnit*> {
664 typedef SUnit NodeType;
665 typedef SUnitIterator ChildIteratorType;
666 static inline NodeType *getEntryNode(SUnit *N) { return N; }
667 static inline ChildIteratorType child_begin(NodeType *N) {
668 return SUnitIterator::begin(N);
670 static inline ChildIteratorType child_end(NodeType *N) {
671 return SUnitIterator::end(N);
675 template <> struct GraphTraits<ScheduleDAG*> : public GraphTraits<SUnit*> {
676 typedef std::vector<SUnit>::iterator nodes_iterator;
677 static nodes_iterator nodes_begin(ScheduleDAG *G) {
678 return G->SUnits.begin();
680 static nodes_iterator nodes_end(ScheduleDAG *G) {
681 return G->SUnits.end();
685 /// ScheduleDAGTopologicalSort is a class that computes a topological
686 /// ordering for SUnits and provides methods for dynamically updating
687 /// the ordering as new edges are added.
689 /// This allows a very fast implementation of IsReachable, for example.
691 class ScheduleDAGTopologicalSort {
692 /// SUnits - A reference to the ScheduleDAG's SUnits.
693 std::vector<SUnit> &SUnits;
696 /// Index2Node - Maps topological index to the node number.
697 std::vector<int> Index2Node;
698 /// Node2Index - Maps the node number to its topological index.
699 std::vector<int> Node2Index;
700 /// Visited - a set of nodes visited during a DFS traversal.
703 /// DFS - make a DFS traversal and mark all nodes affected by the
704 /// edge insertion. These nodes will later get new topological indexes
705 /// by means of the Shift method.
706 void DFS(const SUnit *SU, int UpperBound, bool& HasLoop);
708 /// Shift - reassign topological indexes for the nodes in the DAG
709 /// to preserve the topological ordering.
710 void Shift(BitVector& Visited, int LowerBound, int UpperBound);
712 /// Allocate - assign the topological index to the node n.
713 void Allocate(int n, int index);
716 ScheduleDAGTopologicalSort(std::vector<SUnit> &SUnits, SUnit *ExitSU);
718 /// InitDAGTopologicalSorting - create the initial topological
719 /// ordering from the DAG to be scheduled.
720 void InitDAGTopologicalSorting();
722 /// IsReachable - Checks if SU is reachable from TargetSU.
723 bool IsReachable(const SUnit *SU, const SUnit *TargetSU);
725 /// WillCreateCycle - Return true if addPred(TargetSU, SU) creates a cycle.
726 bool WillCreateCycle(SUnit *TargetSU, SUnit *SU);
728 /// AddPred - Updates the topological ordering to accommodate an edge
729 /// to be added from SUnit X to SUnit Y.
730 void AddPred(SUnit *Y, SUnit *X);
732 /// RemovePred - Updates the topological ordering to accommodate an
733 /// an edge to be removed from the specified node N from the predecessors
734 /// of the current node M.
735 void RemovePred(SUnit *M, SUnit *N);
737 typedef std::vector<int>::iterator iterator;
738 typedef std::vector<int>::const_iterator const_iterator;
739 iterator begin() { return Index2Node.begin(); }
740 const_iterator begin() const { return Index2Node.begin(); }
741 iterator end() { return Index2Node.end(); }
742 const_iterator end() const { return Index2Node.end(); }
744 typedef std::vector<int>::reverse_iterator reverse_iterator;
745 typedef std::vector<int>::const_reverse_iterator const_reverse_iterator;
746 reverse_iterator rbegin() { return Index2Node.rbegin(); }
747 const_reverse_iterator rbegin() const { return Index2Node.rbegin(); }
748 reverse_iterator rend() { return Index2Node.rend(); }
749 const_reverse_iterator rend() const { return Index2Node.rend(); }