1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 tdrListrDAGScheduler("list-tdrr",
49 "Top-down register reduction list scheduling",
50 createTDRRListDAGScheduler);
51 static RegisterScheduler
52 sourceListDAGScheduler("source",
53 "Similar to list-burr but schedules in source "
54 "order when possible",
55 createSourceListDAGScheduler);
57 static RegisterScheduler
58 hybridListDAGScheduler("list-hybrid",
59 "Bottom-up register pressure aware list scheduling "
60 "which tries to balance latency and register pressure",
61 createHybridListDAGScheduler);
63 static RegisterScheduler
64 ILPListDAGScheduler("list-ilp",
65 "Bottom-up register pressure aware list scheduling "
66 "which tries to balance ILP and register pressure",
67 createILPListDAGScheduler);
69 static cl::opt<bool> DisableSchedCycles(
70 "disable-sched-cycles", cl::Hidden, cl::init(false),
71 cl::desc("Disable cycle-level precision during preRA scheduling"));
73 // Temporary sched=list-ilp flags until the heuristics are robust.
74 // Some options are also available under sched=list-hybrid.
75 static cl::opt<bool> DisableSchedRegPressure(
76 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
77 cl::desc("Disable regpressure priority in sched=list-ilp"));
78 static cl::opt<bool> DisableSchedLiveUses(
79 "disable-sched-live-uses", cl::Hidden, cl::init(true),
80 cl::desc("Disable live use priority in sched=list-ilp"));
81 static cl::opt<bool> DisableSchedVRegCycle(
82 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
83 cl::desc("Disable virtual register cycle interference checks"));
84 static cl::opt<bool> DisableSchedPhysRegJoin(
85 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
86 cl::desc("Disable physreg def-use affinity"));
87 static cl::opt<bool> DisableSchedStalls(
88 "disable-sched-stalls", cl::Hidden, cl::init(true),
89 cl::desc("Disable no-stall priority in sched=list-ilp"));
90 static cl::opt<bool> DisableSchedCriticalPath(
91 "disable-sched-critical-path", cl::Hidden, cl::init(false),
92 cl::desc("Disable critical path priority in sched=list-ilp"));
93 static cl::opt<bool> DisableSchedHeight(
94 "disable-sched-height", cl::Hidden, cl::init(false),
95 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
97 static cl::opt<int> MaxReorderWindow(
98 "max-sched-reorder", cl::Hidden, cl::init(6),
99 cl::desc("Number of instructions to allow ahead of the critical path "
100 "in sched=list-ilp"));
102 static cl::opt<unsigned> AvgIPC(
103 "sched-avg-ipc", cl::Hidden, cl::init(1),
104 cl::desc("Average inst/cycle whan no target itinerary exists."));
108 // For sched=list-ilp, Count the number of times each factor comes into play.
109 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
110 FactStatic, FactOther, NumFactors };
112 static const char *FactorName[NumFactors] =
113 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
114 static int FactorCount[NumFactors];
118 //===----------------------------------------------------------------------===//
119 /// ScheduleDAGRRList - The actual register reduction list scheduler
120 /// implementation. This supports both top-down and bottom-up scheduling.
122 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
124 /// isBottomUp - This is true if the scheduling problem is bottom-up, false if
128 /// NeedLatency - True if the scheduler will make use of latency information.
132 /// AvailableQueue - The priority queue to use for the available SUnits.
133 SchedulingPriorityQueue *AvailableQueue;
135 /// PendingQueue - This contains all of the instructions whose operands have
136 /// been issued, but their results are not ready yet (due to the latency of
137 /// the operation). Once the operands becomes available, the instruction is
138 /// added to the AvailableQueue.
139 std::vector<SUnit*> PendingQueue;
141 /// HazardRec - The hazard recognizer to use.
142 ScheduleHazardRecognizer *HazardRec;
144 /// CurCycle - The current scheduler state corresponds to this cycle.
147 /// MinAvailableCycle - Cycle of the soonest available instruction.
148 unsigned MinAvailableCycle;
150 /// IssueCount - Count instructions issued in this cycle
151 /// Currently valid only for bottom-up scheduling.
154 /// LiveRegDefs - A set of physical registers and their definition
155 /// that are "live". These nodes must be scheduled before any other nodes that
156 /// modifies the registers can be scheduled.
157 unsigned NumLiveRegs;
158 std::vector<SUnit*> LiveRegDefs;
159 std::vector<SUnit*> LiveRegGens;
161 /// Topo - A topological ordering for SUnits which permits fast IsReachable
162 /// and similar queries.
163 ScheduleDAGTopologicalSort Topo;
166 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
167 SchedulingPriorityQueue *availqueue,
168 CodeGenOpt::Level OptLevel)
169 : ScheduleDAGSDNodes(mf), isBottomUp(availqueue->isBottomUp()),
170 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
173 const TargetMachine &tm = mf.getTarget();
174 if (DisableSchedCycles || !NeedLatency)
175 HazardRec = new ScheduleHazardRecognizer();
177 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
180 ~ScheduleDAGRRList() {
182 delete AvailableQueue;
187 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
189 /// IsReachable - Checks if SU is reachable from TargetSU.
190 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
191 return Topo.IsReachable(SU, TargetSU);
194 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
196 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
197 return Topo.WillCreateCycle(SU, TargetSU);
200 /// AddPred - adds a predecessor edge to SUnit SU.
201 /// This returns true if this is a new predecessor.
202 /// Updates the topological ordering if required.
203 void AddPred(SUnit *SU, const SDep &D) {
204 Topo.AddPred(SU, D.getSUnit());
208 /// RemovePred - removes a predecessor edge from SUnit SU.
209 /// This returns true if an edge was removed.
210 /// Updates the topological ordering if required.
211 void RemovePred(SUnit *SU, const SDep &D) {
212 Topo.RemovePred(SU, D.getSUnit());
217 bool isReady(SUnit *SU) {
218 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
219 AvailableQueue->isReady(SU);
222 void ReleasePred(SUnit *SU, const SDep *PredEdge);
223 void ReleasePredecessors(SUnit *SU);
224 void ReleaseSucc(SUnit *SU, const SDep *SuccEdge);
225 void ReleaseSuccessors(SUnit *SU);
226 void ReleasePending();
227 void AdvanceToCycle(unsigned NextCycle);
228 void AdvancePastStalls(SUnit *SU);
229 void EmitNode(SUnit *SU);
230 void ScheduleNodeBottomUp(SUnit*);
231 void CapturePred(SDep *PredEdge);
232 void UnscheduleNodeBottomUp(SUnit*);
233 void RestoreHazardCheckerBottomUp();
234 void BacktrackBottomUp(SUnit*, SUnit*);
235 SUnit *CopyAndMoveSuccessors(SUnit*);
236 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
237 const TargetRegisterClass*,
238 const TargetRegisterClass*,
239 SmallVector<SUnit*, 2>&);
240 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
242 SUnit *PickNodeToScheduleBottomUp();
243 void ListScheduleBottomUp();
245 void ScheduleNodeTopDown(SUnit*);
246 void ListScheduleTopDown();
249 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
250 /// Updates the topological ordering if required.
251 SUnit *CreateNewSUnit(SDNode *N) {
252 unsigned NumSUnits = SUnits.size();
253 SUnit *NewNode = NewSUnit(N);
254 // Update the topological ordering.
255 if (NewNode->NodeNum >= NumSUnits)
256 Topo.InitDAGTopologicalSorting();
260 /// CreateClone - Creates a new SUnit from an existing one.
261 /// Updates the topological ordering if required.
262 SUnit *CreateClone(SUnit *N) {
263 unsigned NumSUnits = SUnits.size();
264 SUnit *NewNode = Clone(N);
265 // Update the topological ordering.
266 if (NewNode->NodeNum >= NumSUnits)
267 Topo.InitDAGTopologicalSorting();
271 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
272 /// need actual latency information but the hybrid scheduler does.
273 bool ForceUnitLatencies() const {
277 } // end anonymous namespace
279 /// GetCostForDef - Looks up the register class and cost for a given definition.
280 /// Typically this just means looking up the representative register class,
281 /// but for untyped values (MVT::untyped) it means inspecting the node's
282 /// opcode to determine what register class is being generated.
283 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
284 const TargetLowering *TLI,
285 const TargetInstrInfo *TII,
286 const TargetRegisterInfo *TRI,
287 unsigned &RegClass, unsigned &Cost) {
288 EVT VT = RegDefPos.GetValue();
290 // Special handling for untyped values. These values can only come from
291 // the expansion of custom DAG-to-DAG patterns.
292 if (VT == MVT::untyped) {
293 const SDNode *Node = RegDefPos.GetNode();
294 unsigned Opcode = Node->getMachineOpcode();
296 if (Opcode == TargetOpcode::REG_SEQUENCE) {
297 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
298 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
299 RegClass = RC->getID();
304 unsigned Idx = RegDefPos.GetIdx();
305 const TargetInstrDesc Desc = TII->get(Opcode);
306 const TargetRegisterClass *RC = Desc.getRegClass(Idx, TRI);
307 RegClass = RC->getID();
308 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
309 // better way to determine it.
312 RegClass = TLI->getRepRegClassFor(VT)->getID();
313 Cost = TLI->getRepRegClassCostFor(VT);
317 /// Schedule - Schedule the DAG using list scheduling.
318 void ScheduleDAGRRList::Schedule() {
320 << "********** List Scheduling BB#" << BB->getNumber()
321 << " '" << BB->getName() << "' **********\n");
323 for (int i = 0; i < NumFactors; ++i) {
330 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
332 LiveRegDefs.resize(TRI->getNumRegs(), NULL);
333 LiveRegGens.resize(TRI->getNumRegs(), NULL);
335 // Build the scheduling graph.
336 BuildSchedGraph(NULL);
338 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
339 SUnits[su].dumpAll(this));
340 Topo.InitDAGTopologicalSorting();
342 AvailableQueue->initNodes(SUnits);
346 // Execute the actual scheduling loop Top-Down or Bottom-Up as appropriate.
348 ListScheduleBottomUp();
350 ListScheduleTopDown();
353 for (int i = 0; i < NumFactors; ++i) {
354 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
357 AvailableQueue->releaseState();
360 //===----------------------------------------------------------------------===//
361 // Bottom-Up Scheduling
362 //===----------------------------------------------------------------------===//
364 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
365 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
366 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
367 SUnit *PredSU = PredEdge->getSUnit();
370 if (PredSU->NumSuccsLeft == 0) {
371 dbgs() << "*** Scheduling failed! ***\n";
373 dbgs() << " has been released too many times!\n";
377 --PredSU->NumSuccsLeft;
379 if (!ForceUnitLatencies()) {
380 // Updating predecessor's height. This is now the cycle when the
381 // predecessor can be scheduled without causing a pipeline stall.
382 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
385 // If all the node's successors are scheduled, this node is ready
386 // to be scheduled. Ignore the special EntrySU node.
387 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
388 PredSU->isAvailable = true;
390 unsigned Height = PredSU->getHeight();
391 if (Height < MinAvailableCycle)
392 MinAvailableCycle = Height;
394 if (isReady(PredSU)) {
395 AvailableQueue->push(PredSU);
397 // CapturePred and others may have left the node in the pending queue, avoid
399 else if (!PredSU->isPending) {
400 PredSU->isPending = true;
401 PendingQueue.push_back(PredSU);
406 /// Call ReleasePred for each predecessor, then update register live def/gen.
407 /// Always update LiveRegDefs for a register dependence even if the current SU
408 /// also defines the register. This effectively create one large live range
409 /// across a sequence of two-address node. This is important because the
410 /// entire chain must be scheduled together. Example:
413 /// flags = (2) addc flags
414 /// flags = (1) addc flags
418 /// LiveRegDefs[flags] = 3
419 /// LiveRegGens[flags] = 1
421 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
422 /// interference on flags.
423 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
424 // Bottom up: release predecessors
425 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
427 ReleasePred(SU, &*I);
428 if (I->isAssignedRegDep()) {
429 // This is a physical register dependency and it's impossible or
430 // expensive to copy the register. Make sure nothing that can
431 // clobber the register is scheduled between the predecessor and
433 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
434 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
435 "interference on register dependence");
436 LiveRegDefs[I->getReg()] = I->getSUnit();
437 if (!LiveRegGens[I->getReg()]) {
439 LiveRegGens[I->getReg()] = SU;
445 /// Check to see if any of the pending instructions are ready to issue. If
446 /// so, add them to the available queue.
447 void ScheduleDAGRRList::ReleasePending() {
448 if (DisableSchedCycles) {
449 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
453 // If the available queue is empty, it is safe to reset MinAvailableCycle.
454 if (AvailableQueue->empty())
455 MinAvailableCycle = UINT_MAX;
457 // Check to see if any of the pending instructions are ready to issue. If
458 // so, add them to the available queue.
459 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
460 unsigned ReadyCycle =
461 isBottomUp ? PendingQueue[i]->getHeight() : PendingQueue[i]->getDepth();
462 if (ReadyCycle < MinAvailableCycle)
463 MinAvailableCycle = ReadyCycle;
465 if (PendingQueue[i]->isAvailable) {
466 if (!isReady(PendingQueue[i]))
468 AvailableQueue->push(PendingQueue[i]);
470 PendingQueue[i]->isPending = false;
471 PendingQueue[i] = PendingQueue.back();
472 PendingQueue.pop_back();
477 /// Move the scheduler state forward by the specified number of Cycles.
478 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
479 if (NextCycle <= CurCycle)
483 AvailableQueue->setCurCycle(NextCycle);
484 if (!HazardRec->isEnabled()) {
485 // Bypass lots of virtual calls in case of long latency.
486 CurCycle = NextCycle;
489 for (; CurCycle != NextCycle; ++CurCycle) {
491 HazardRec->RecedeCycle();
493 HazardRec->AdvanceCycle();
496 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
497 // available Q to release pending nodes at least once before popping.
501 /// Move the scheduler state forward until the specified node's dependents are
502 /// ready and can be scheduled with no resource conflicts.
503 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
504 if (DisableSchedCycles)
507 // FIXME: Nodes such as CopyFromReg probably should not advance the current
508 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
509 // has predecessors the cycle will be advanced when they are scheduled.
510 // But given the crude nature of modeling latency though such nodes, we
511 // currently need to treat these nodes like real instructions.
512 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
514 unsigned ReadyCycle = isBottomUp ? SU->getHeight() : SU->getDepth();
516 // Bump CurCycle to account for latency. We assume the latency of other
517 // available instructions may be hidden by the stall (not a full pipe stall).
518 // This updates the hazard recognizer's cycle before reserving resources for
520 AdvanceToCycle(ReadyCycle);
522 // Calls are scheduled in their preceding cycle, so don't conflict with
523 // hazards from instructions after the call. EmitNode will reset the
524 // scoreboard state before emitting the call.
525 if (isBottomUp && SU->isCall)
528 // FIXME: For resource conflicts in very long non-pipelined stages, we
529 // should probably skip ahead here to avoid useless scoreboard checks.
532 ScheduleHazardRecognizer::HazardType HT =
533 HazardRec->getHazardType(SU, isBottomUp ? -Stalls : Stalls);
535 if (HT == ScheduleHazardRecognizer::NoHazard)
540 AdvanceToCycle(CurCycle + Stalls);
543 /// Record this SUnit in the HazardRecognizer.
544 /// Does not update CurCycle.
545 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
546 if (!HazardRec->isEnabled())
549 // Check for phys reg copy.
553 switch (SU->getNode()->getOpcode()) {
555 assert(SU->getNode()->isMachineOpcode() &&
556 "This target-independent node should not be scheduled.");
558 case ISD::MERGE_VALUES:
559 case ISD::TokenFactor:
561 case ISD::CopyFromReg:
563 // Noops don't affect the scoreboard state. Copies are likely to be
567 // For inline asm, clear the pipeline state.
571 if (isBottomUp && SU->isCall) {
572 // Calls are scheduled with their preceding instructions. For bottom-up
573 // scheduling, clear the pipeline state before emitting.
577 HazardRec->EmitInstruction(SU);
579 if (!isBottomUp && SU->isCall) {
584 static void resetVRegCycle(SUnit *SU);
586 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
587 /// count of its predecessors. If a predecessor pending count is zero, add it to
588 /// the Available queue.
589 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
590 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
591 DEBUG(SU->dump(this));
594 if (CurCycle < SU->getHeight())
595 DEBUG(dbgs() << " Height [" << SU->getHeight()
596 << "] pipeline stall!\n");
599 // FIXME: Do not modify node height. It may interfere with
600 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
601 // node its ready cycle can aid heuristics, and after scheduling it can
602 // indicate the scheduled cycle.
603 SU->setHeightToAtLeast(CurCycle);
605 // Reserve resources for the scheduled intruction.
608 Sequence.push_back(SU);
610 AvailableQueue->ScheduledNode(SU);
612 // If HazardRec is disabled, and each inst counts as one cycle, then
613 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
614 // PendingQueue for schedulers that implement HasReadyFilter.
615 if (!HazardRec->isEnabled() && AvgIPC < 2)
616 AdvanceToCycle(CurCycle + 1);
618 // Update liveness of predecessors before successors to avoid treating a
619 // two-address node as a live range def.
620 ReleasePredecessors(SU);
622 // Release all the implicit physical register defs that are live.
623 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
625 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
626 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
627 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
629 LiveRegDefs[I->getReg()] = NULL;
630 LiveRegGens[I->getReg()] = NULL;
636 SU->isScheduled = true;
638 // Conditions under which the scheduler should eagerly advance the cycle:
639 // (1) No available instructions
640 // (2) All pipelines full, so available instructions must have hazards.
642 // If HazardRec is disabled, the cycle was pre-advanced before calling
643 // ReleasePredecessors. In that case, IssueCount should remain 0.
645 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
646 if (HazardRec->isEnabled() || AvgIPC > 1) {
647 if (SU->getNode() && SU->getNode()->isMachineOpcode())
649 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
650 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
651 AdvanceToCycle(CurCycle + 1);
655 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
656 /// unscheduled, incrcease the succ left count of its predecessors. Remove
657 /// them from AvailableQueue if necessary.
658 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
659 SUnit *PredSU = PredEdge->getSUnit();
660 if (PredSU->isAvailable) {
661 PredSU->isAvailable = false;
662 if (!PredSU->isPending)
663 AvailableQueue->remove(PredSU);
666 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
667 ++PredSU->NumSuccsLeft;
670 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
671 /// its predecessor states to reflect the change.
672 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
673 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
674 DEBUG(SU->dump(this));
676 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
679 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
680 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
681 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
682 "Physical register dependency violated?");
684 LiveRegDefs[I->getReg()] = NULL;
685 LiveRegGens[I->getReg()] = NULL;
689 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
691 if (I->isAssignedRegDep()) {
692 // This becomes the nearest def. Note that an earlier def may still be
693 // pending if this is a two-address node.
694 LiveRegDefs[I->getReg()] = SU;
695 if (!LiveRegDefs[I->getReg()]) {
698 if (LiveRegGens[I->getReg()] == NULL ||
699 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
700 LiveRegGens[I->getReg()] = I->getSUnit();
703 if (SU->getHeight() < MinAvailableCycle)
704 MinAvailableCycle = SU->getHeight();
706 SU->setHeightDirty();
707 SU->isScheduled = false;
708 SU->isAvailable = true;
709 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
710 // Don't make available until backtracking is complete.
711 SU->isPending = true;
712 PendingQueue.push_back(SU);
715 AvailableQueue->push(SU);
717 AvailableQueue->UnscheduledNode(SU);
720 /// After backtracking, the hazard checker needs to be restored to a state
721 /// corresponding the the current cycle.
722 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
725 unsigned LookAhead = std::min((unsigned)Sequence.size(),
726 HazardRec->getMaxLookAhead());
730 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
731 unsigned HazardCycle = (*I)->getHeight();
732 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
734 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
735 HazardRec->RecedeCycle();
741 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
742 /// BTCycle in order to schedule a specific node.
743 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
744 SUnit *OldSU = Sequence.back();
747 if (SU->isSucc(OldSU))
748 // Don't try to remove SU from AvailableQueue.
749 SU->isAvailable = false;
750 // FIXME: use ready cycle instead of height
751 CurCycle = OldSU->getHeight();
752 UnscheduleNodeBottomUp(OldSU);
753 AvailableQueue->setCurCycle(CurCycle);
756 OldSU = Sequence.back();
759 assert(!SU->isSucc(OldSU) && "Something is wrong!");
761 RestoreHazardCheckerBottomUp();
768 static bool isOperandOf(const SUnit *SU, SDNode *N) {
769 for (const SDNode *SUNode = SU->getNode(); SUNode;
770 SUNode = SUNode->getGluedNode()) {
771 if (SUNode->isOperandOf(N))
777 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
778 /// successors to the newly created node.
779 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
780 SDNode *N = SU->getNode();
784 if (SU->getNode()->getGluedNode())
788 bool TryUnfold = false;
789 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
790 EVT VT = N->getValueType(i);
793 else if (VT == MVT::Other)
796 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
797 const SDValue &Op = N->getOperand(i);
798 EVT VT = Op.getNode()->getValueType(Op.getResNo());
804 SmallVector<SDNode*, 2> NewNodes;
805 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
808 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
809 assert(NewNodes.size() == 2 && "Expected a load folding node!");
812 SDNode *LoadNode = NewNodes[0];
813 unsigned NumVals = N->getNumValues();
814 unsigned OldNumVals = SU->getNode()->getNumValues();
815 for (unsigned i = 0; i != NumVals; ++i)
816 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
817 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
818 SDValue(LoadNode, 1));
820 // LoadNode may already exist. This can happen when there is another
821 // load from the same location and producing the same type of value
822 // but it has different alignment or volatileness.
823 bool isNewLoad = true;
825 if (LoadNode->getNodeId() != -1) {
826 LoadSU = &SUnits[LoadNode->getNodeId()];
829 LoadSU = CreateNewSUnit(LoadNode);
830 LoadNode->setNodeId(LoadSU->NodeNum);
832 InitNumRegDefsLeft(LoadSU);
833 ComputeLatency(LoadSU);
836 SUnit *NewSU = CreateNewSUnit(N);
837 assert(N->getNodeId() == -1 && "Node already inserted!");
838 N->setNodeId(NewSU->NodeNum);
840 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
841 for (unsigned i = 0; i != TID.getNumOperands(); ++i) {
842 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1) {
843 NewSU->isTwoAddress = true;
847 if (TID.isCommutable())
848 NewSU->isCommutable = true;
850 InitNumRegDefsLeft(NewSU);
851 ComputeLatency(NewSU);
853 // Record all the edges to and from the old SU, by category.
854 SmallVector<SDep, 4> ChainPreds;
855 SmallVector<SDep, 4> ChainSuccs;
856 SmallVector<SDep, 4> LoadPreds;
857 SmallVector<SDep, 4> NodePreds;
858 SmallVector<SDep, 4> NodeSuccs;
859 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
862 ChainPreds.push_back(*I);
863 else if (isOperandOf(I->getSUnit(), LoadNode))
864 LoadPreds.push_back(*I);
866 NodePreds.push_back(*I);
868 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
871 ChainSuccs.push_back(*I);
873 NodeSuccs.push_back(*I);
876 // Now assign edges to the newly-created nodes.
877 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
878 const SDep &Pred = ChainPreds[i];
879 RemovePred(SU, Pred);
881 AddPred(LoadSU, Pred);
883 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
884 const SDep &Pred = LoadPreds[i];
885 RemovePred(SU, Pred);
887 AddPred(LoadSU, Pred);
889 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
890 const SDep &Pred = NodePreds[i];
891 RemovePred(SU, Pred);
892 AddPred(NewSU, Pred);
894 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
895 SDep D = NodeSuccs[i];
896 SUnit *SuccDep = D.getSUnit();
898 RemovePred(SuccDep, D);
901 // Balance register pressure.
902 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
903 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
904 --NewSU->NumRegDefsLeft;
906 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
907 SDep D = ChainSuccs[i];
908 SUnit *SuccDep = D.getSUnit();
910 RemovePred(SuccDep, D);
917 // Add a data dependency to reflect that NewSU reads the value defined
919 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
922 AvailableQueue->addNode(LoadSU);
923 AvailableQueue->addNode(NewSU);
927 if (NewSU->NumSuccsLeft == 0) {
928 NewSU->isAvailable = true;
934 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
935 NewSU = CreateClone(SU);
937 // New SUnit has the exact same predecessors.
938 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
940 if (!I->isArtificial())
943 // Only copy scheduled successors. Cut them from old node's successor
944 // list and move them over.
945 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
946 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
948 if (I->isArtificial())
950 SUnit *SuccSU = I->getSUnit();
951 if (SuccSU->isScheduled) {
956 DelDeps.push_back(std::make_pair(SuccSU, D));
959 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
960 RemovePred(DelDeps[i].first, DelDeps[i].second);
962 AvailableQueue->updateNode(SU);
963 AvailableQueue->addNode(NewSU);
969 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
970 /// scheduled successors of the given SUnit to the last copy.
971 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
972 const TargetRegisterClass *DestRC,
973 const TargetRegisterClass *SrcRC,
974 SmallVector<SUnit*, 2> &Copies) {
975 SUnit *CopyFromSU = CreateNewSUnit(NULL);
976 CopyFromSU->CopySrcRC = SrcRC;
977 CopyFromSU->CopyDstRC = DestRC;
979 SUnit *CopyToSU = CreateNewSUnit(NULL);
980 CopyToSU->CopySrcRC = DestRC;
981 CopyToSU->CopyDstRC = SrcRC;
983 // Only copy scheduled successors. Cut them from old node's successor
984 // list and move them over.
985 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
986 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
988 if (I->isArtificial())
990 SUnit *SuccSU = I->getSUnit();
991 if (SuccSU->isScheduled) {
993 D.setSUnit(CopyToSU);
995 DelDeps.push_back(std::make_pair(SuccSU, *I));
998 // Avoid scheduling the def-side copy before other successors. Otherwise
999 // we could introduce another physreg interference on the copy and
1000 // continue inserting copies indefinitely.
1001 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
1002 /*Reg=*/0, /*isNormalMemory=*/false,
1003 /*isMustAlias=*/false, /*isArtificial=*/true);
1007 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1008 RemovePred(DelDeps[i].first, DelDeps[i].second);
1010 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1011 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1013 AvailableQueue->updateNode(SU);
1014 AvailableQueue->addNode(CopyFromSU);
1015 AvailableQueue->addNode(CopyToSU);
1016 Copies.push_back(CopyFromSU);
1017 Copies.push_back(CopyToSU);
1022 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1023 /// definition of the specified node.
1024 /// FIXME: Move to SelectionDAG?
1025 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1026 const TargetInstrInfo *TII) {
1027 const TargetInstrDesc &TID = TII->get(N->getMachineOpcode());
1028 assert(TID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1029 unsigned NumRes = TID.getNumDefs();
1030 for (const unsigned *ImpDef = TID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1035 return N->getValueType(NumRes);
1038 /// CheckForLiveRegDef - Return true and update live register vector if the
1039 /// specified register def of the specified SUnit clobbers any "live" registers.
1040 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1041 std::vector<SUnit*> &LiveRegDefs,
1042 SmallSet<unsigned, 4> &RegAdded,
1043 SmallVector<unsigned, 4> &LRegs,
1044 const TargetRegisterInfo *TRI) {
1045 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1047 // Check if Ref is live.
1048 if (!LiveRegDefs[*AliasI]) continue;
1050 // Allow multiple uses of the same def.
1051 if (LiveRegDefs[*AliasI] == SU) continue;
1053 // Add Reg to the set of interfering live regs.
1054 if (RegAdded.insert(*AliasI)) {
1055 LRegs.push_back(*AliasI);
1060 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1061 /// scheduling of the given node to satisfy live physical register dependencies.
1062 /// If the specific node is the last one that's available to schedule, do
1063 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1064 bool ScheduleDAGRRList::
1065 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1066 if (NumLiveRegs == 0)
1069 SmallSet<unsigned, 4> RegAdded;
1070 // If this node would clobber any "live" register, then it's not ready.
1072 // If SU is the currently live definition of the same register that it uses,
1073 // then we are free to schedule it.
1074 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1076 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1077 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1078 RegAdded, LRegs, TRI);
1081 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1082 if (Node->getOpcode() == ISD::INLINEASM) {
1083 // Inline asm can clobber physical defs.
1084 unsigned NumOps = Node->getNumOperands();
1085 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1086 --NumOps; // Ignore the glue operand.
1088 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1090 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1091 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1093 ++i; // Skip the ID value.
1094 if (InlineAsm::isRegDefKind(Flags) ||
1095 InlineAsm::isRegDefEarlyClobberKind(Flags)) {
1096 // Check for def of register or earlyclobber register.
1097 for (; NumVals; --NumVals, ++i) {
1098 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1099 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1100 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1108 if (!Node->isMachineOpcode())
1110 const TargetInstrDesc &TID = TII->get(Node->getMachineOpcode());
1111 if (!TID.ImplicitDefs)
1113 for (const unsigned *Reg = TID.ImplicitDefs; *Reg; ++Reg)
1114 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1117 return !LRegs.empty();
1120 /// Return a node that can be scheduled in this cycle. Requirements:
1121 /// (1) Ready: latency has been satisfied
1122 /// (2) No Hazards: resources are available
1123 /// (3) No Interferences: may unschedule to break register interferences.
1124 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1125 SmallVector<SUnit*, 4> Interferences;
1126 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1128 SUnit *CurSU = AvailableQueue->pop();
1130 SmallVector<unsigned, 4> LRegs;
1131 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1133 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1135 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1136 Interferences.push_back(CurSU);
1137 CurSU = AvailableQueue->pop();
1140 // Add the nodes that aren't ready back onto the available list.
1141 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1142 Interferences[i]->isPending = false;
1143 assert(Interferences[i]->isAvailable && "must still be available");
1144 AvailableQueue->push(Interferences[i]);
1149 // All candidates are delayed due to live physical reg dependencies.
1150 // Try backtracking, code duplication, or inserting cross class copies
1152 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1153 SUnit *TrySU = Interferences[i];
1154 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1156 // Try unscheduling up to the point where it's safe to schedule
1159 unsigned LiveCycle = UINT_MAX;
1160 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1161 unsigned Reg = LRegs[j];
1162 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1163 BtSU = LiveRegGens[Reg];
1164 LiveCycle = BtSU->getHeight();
1167 if (!WillCreateCycle(TrySU, BtSU)) {
1168 BacktrackBottomUp(TrySU, BtSU);
1170 // Force the current node to be scheduled before the node that
1171 // requires the physical reg dep.
1172 if (BtSU->isAvailable) {
1173 BtSU->isAvailable = false;
1174 if (!BtSU->isPending)
1175 AvailableQueue->remove(BtSU);
1177 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1178 /*Reg=*/0, /*isNormalMemory=*/false,
1179 /*isMustAlias=*/false, /*isArtificial=*/true));
1181 // If one or more successors has been unscheduled, then the current
1182 // node is no longer avaialable. Schedule a successor that's now
1183 // available instead.
1184 if (!TrySU->isAvailable) {
1185 CurSU = AvailableQueue->pop();
1189 TrySU->isPending = false;
1190 Interferences.erase(Interferences.begin()+i);
1197 // Can't backtrack. If it's too expensive to copy the value, then try
1198 // duplicate the nodes that produces these "too expensive to copy"
1199 // values to break the dependency. In case even that doesn't work,
1200 // insert cross class copies.
1201 // If it's not too expensive, i.e. cost != -1, issue copies.
1202 SUnit *TrySU = Interferences[0];
1203 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1204 assert(LRegs.size() == 1 && "Can't handle this yet!");
1205 unsigned Reg = LRegs[0];
1206 SUnit *LRDef = LiveRegDefs[Reg];
1207 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1208 const TargetRegisterClass *RC =
1209 TRI->getMinimalPhysRegClass(Reg, VT);
1210 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1212 // If cross copy register class is the same as RC, then it must be possible
1213 // copy the value directly. Do not try duplicate the def.
1214 // If cross copy register class is not the same as RC, then it's possible to
1215 // copy the value but it require cross register class copies and it is
1217 // If cross copy register class is null, then it's not possible to copy
1218 // the value at all.
1221 NewDef = CopyAndMoveSuccessors(LRDef);
1222 if (!DestRC && !NewDef)
1223 report_fatal_error("Can't handle live physical register dependency!");
1226 // Issue copies, these can be expensive cross register class copies.
1227 SmallVector<SUnit*, 2> Copies;
1228 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1229 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1230 << " to SU #" << Copies.front()->NodeNum << "\n");
1231 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1232 /*Reg=*/0, /*isNormalMemory=*/false,
1233 /*isMustAlias=*/false,
1234 /*isArtificial=*/true));
1235 NewDef = Copies.back();
1238 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1239 << " to SU #" << TrySU->NodeNum << "\n");
1240 LiveRegDefs[Reg] = NewDef;
1241 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1242 /*Reg=*/0, /*isNormalMemory=*/false,
1243 /*isMustAlias=*/false,
1244 /*isArtificial=*/true));
1245 TrySU->isAvailable = false;
1249 assert(CurSU && "Unable to resolve live physical register dependencies!");
1251 // Add the nodes that aren't ready back onto the available list.
1252 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1253 Interferences[i]->isPending = false;
1254 // May no longer be available due to backtracking.
1255 if (Interferences[i]->isAvailable) {
1256 AvailableQueue->push(Interferences[i]);
1262 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1264 void ScheduleDAGRRList::ListScheduleBottomUp() {
1265 // Release any predecessors of the special Exit node.
1266 ReleasePredecessors(&ExitSU);
1268 // Add root to Available queue.
1269 if (!SUnits.empty()) {
1270 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1271 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1272 RootSU->isAvailable = true;
1273 AvailableQueue->push(RootSU);
1276 // While Available queue is not empty, grab the node with the highest
1277 // priority. If it is not ready put it back. Schedule the node.
1278 Sequence.reserve(SUnits.size());
1279 while (!AvailableQueue->empty()) {
1280 DEBUG(dbgs() << "\nExamining Available:\n";
1281 AvailableQueue->dump(this));
1283 // Pick the best node to schedule taking all constraints into
1285 SUnit *SU = PickNodeToScheduleBottomUp();
1287 AdvancePastStalls(SU);
1289 ScheduleNodeBottomUp(SU);
1291 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1292 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1293 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1294 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1298 // Reverse the order if it is bottom up.
1299 std::reverse(Sequence.begin(), Sequence.end());
1302 VerifySchedule(isBottomUp);
1306 //===----------------------------------------------------------------------===//
1307 // Top-Down Scheduling
1308 //===----------------------------------------------------------------------===//
1310 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1311 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
1312 void ScheduleDAGRRList::ReleaseSucc(SUnit *SU, const SDep *SuccEdge) {
1313 SUnit *SuccSU = SuccEdge->getSUnit();
1316 if (SuccSU->NumPredsLeft == 0) {
1317 dbgs() << "*** Scheduling failed! ***\n";
1319 dbgs() << " has been released too many times!\n";
1320 llvm_unreachable(0);
1323 --SuccSU->NumPredsLeft;
1325 // If all the node's predecessors are scheduled, this node is ready
1326 // to be scheduled. Ignore the special ExitSU node.
1327 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU) {
1328 SuccSU->isAvailable = true;
1329 AvailableQueue->push(SuccSU);
1333 void ScheduleDAGRRList::ReleaseSuccessors(SUnit *SU) {
1334 // Top down: release successors
1335 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1337 assert(!I->isAssignedRegDep() &&
1338 "The list-tdrr scheduler doesn't yet support physreg dependencies!");
1340 ReleaseSucc(SU, &*I);
1344 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1345 /// count of its successors. If a successor pending count is zero, add it to
1346 /// the Available queue.
1347 void ScheduleDAGRRList::ScheduleNodeTopDown(SUnit *SU) {
1348 DEBUG(dbgs() << "*** Scheduling [" << CurCycle << "]: ");
1349 DEBUG(SU->dump(this));
1351 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1352 SU->setDepthToAtLeast(CurCycle);
1353 Sequence.push_back(SU);
1355 ReleaseSuccessors(SU);
1356 SU->isScheduled = true;
1357 AvailableQueue->ScheduledNode(SU);
1360 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1362 void ScheduleDAGRRList::ListScheduleTopDown() {
1363 AvailableQueue->setCurCycle(CurCycle);
1365 // Release any successors of the special Entry node.
1366 ReleaseSuccessors(&EntrySU);
1368 // All leaves to Available queue.
1369 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1370 // It is available if it has no predecessors.
1371 if (SUnits[i].Preds.empty()) {
1372 AvailableQueue->push(&SUnits[i]);
1373 SUnits[i].isAvailable = true;
1377 // While Available queue is not empty, grab the node with the highest
1378 // priority. If it is not ready put it back. Schedule the node.
1379 Sequence.reserve(SUnits.size());
1380 while (!AvailableQueue->empty()) {
1381 SUnit *CurSU = AvailableQueue->pop();
1384 ScheduleNodeTopDown(CurSU);
1386 AvailableQueue->setCurCycle(CurCycle);
1390 VerifySchedule(isBottomUp);
1395 //===----------------------------------------------------------------------===//
1396 // RegReductionPriorityQueue Definition
1397 //===----------------------------------------------------------------------===//
1399 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1400 // to reduce register pressure.
1403 class RegReductionPQBase;
1405 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1406 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1411 struct reverse_sort : public queue_sort {
1413 reverse_sort(SF &sf) : SortFunc(sf) {}
1414 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1416 bool operator()(SUnit* left, SUnit* right) const {
1417 // reverse left/right rather than simply !SortFunc(left, right)
1418 // to expose different paths in the comparison logic.
1419 return SortFunc(right, left);
1424 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1425 // reduction scheduler.
1426 struct bu_ls_rr_sort : public queue_sort {
1429 HasReadyFilter = false
1432 RegReductionPQBase *SPQ;
1433 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1434 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1436 bool operator()(SUnit* left, SUnit* right) const;
1439 // td_ls_rr_sort - Priority function for top down register pressure reduction
1441 struct td_ls_rr_sort : public queue_sort {
1444 HasReadyFilter = false
1447 RegReductionPQBase *SPQ;
1448 td_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1449 td_ls_rr_sort(const td_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1451 bool operator()(const SUnit* left, const SUnit* right) const;
1454 // src_ls_rr_sort - Priority function for source order scheduler.
1455 struct src_ls_rr_sort : public queue_sort {
1458 HasReadyFilter = false
1461 RegReductionPQBase *SPQ;
1462 src_ls_rr_sort(RegReductionPQBase *spq)
1464 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1467 bool operator()(SUnit* left, SUnit* right) const;
1470 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1471 struct hybrid_ls_rr_sort : public queue_sort {
1474 HasReadyFilter = false
1477 RegReductionPQBase *SPQ;
1478 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1480 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1483 bool isReady(SUnit *SU, unsigned CurCycle) const;
1485 bool operator()(SUnit* left, SUnit* right) const;
1488 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1490 struct ilp_ls_rr_sort : public queue_sort {
1493 HasReadyFilter = false
1496 RegReductionPQBase *SPQ;
1497 ilp_ls_rr_sort(RegReductionPQBase *spq)
1499 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1502 bool isReady(SUnit *SU, unsigned CurCycle) const;
1504 bool operator()(SUnit* left, SUnit* right) const;
1507 class RegReductionPQBase : public SchedulingPriorityQueue {
1509 std::vector<SUnit*> Queue;
1510 unsigned CurQueueId;
1511 bool TracksRegPressure;
1513 // SUnits - The SUnits for the current graph.
1514 std::vector<SUnit> *SUnits;
1516 MachineFunction &MF;
1517 const TargetInstrInfo *TII;
1518 const TargetRegisterInfo *TRI;
1519 const TargetLowering *TLI;
1520 ScheduleDAGRRList *scheduleDAG;
1522 // SethiUllmanNumbers - The SethiUllman number for each node.
1523 std::vector<unsigned> SethiUllmanNumbers;
1525 /// RegPressure - Tracking current reg pressure per register class.
1527 std::vector<unsigned> RegPressure;
1529 /// RegLimit - Tracking the number of allocatable registers per register
1531 std::vector<unsigned> RegLimit;
1534 RegReductionPQBase(MachineFunction &mf,
1535 bool hasReadyFilter,
1537 const TargetInstrInfo *tii,
1538 const TargetRegisterInfo *tri,
1539 const TargetLowering *tli)
1540 : SchedulingPriorityQueue(hasReadyFilter),
1541 CurQueueId(0), TracksRegPressure(tracksrp),
1542 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1543 if (TracksRegPressure) {
1544 unsigned NumRC = TRI->getNumRegClasses();
1545 RegLimit.resize(NumRC);
1546 RegPressure.resize(NumRC);
1547 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1548 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1549 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1550 E = TRI->regclass_end(); I != E; ++I)
1551 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1555 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1556 scheduleDAG = scheduleDag;
1559 ScheduleHazardRecognizer* getHazardRec() {
1560 return scheduleDAG->getHazardRec();
1563 void initNodes(std::vector<SUnit> &sunits);
1565 void addNode(const SUnit *SU);
1567 void updateNode(const SUnit *SU);
1569 void releaseState() {
1571 SethiUllmanNumbers.clear();
1572 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1575 unsigned getNodePriority(const SUnit *SU) const;
1577 unsigned getNodeOrdering(const SUnit *SU) const {
1578 if (!SU->getNode()) return 0;
1580 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1583 bool empty() const { return Queue.empty(); }
1585 void push(SUnit *U) {
1586 assert(!U->NodeQueueId && "Node in the queue already");
1587 U->NodeQueueId = ++CurQueueId;
1591 void remove(SUnit *SU) {
1592 assert(!Queue.empty() && "Queue is empty!");
1593 assert(SU->NodeQueueId != 0 && "Not in queue!");
1594 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1596 if (I != prior(Queue.end()))
1597 std::swap(*I, Queue.back());
1599 SU->NodeQueueId = 0;
1602 bool tracksRegPressure() const { return TracksRegPressure; }
1604 void dumpRegPressure() const;
1606 bool HighRegPressure(const SUnit *SU) const;
1608 bool MayReduceRegPressure(SUnit *SU) const;
1610 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1612 void ScheduledNode(SUnit *SU);
1614 void UnscheduledNode(SUnit *SU);
1617 bool canClobber(const SUnit *SU, const SUnit *Op);
1618 void AddPseudoTwoAddrDeps();
1619 void PrescheduleNodesWithMultipleUses();
1620 void CalculateSethiUllmanNumbers();
1624 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1625 std::vector<SUnit *>::iterator Best = Q.begin();
1626 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1627 E = Q.end(); I != E; ++I)
1628 if (Picker(*Best, *I))
1631 if (Best != prior(Q.end()))
1632 std::swap(*Best, Q.back());
1638 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1640 if (DAG->StressSched) {
1641 reverse_sort<SF> RPicker(Picker);
1642 return popFromQueueImpl(Q, RPicker);
1646 return popFromQueueImpl(Q, Picker);
1650 class RegReductionPriorityQueue : public RegReductionPQBase {
1654 RegReductionPriorityQueue(MachineFunction &mf,
1656 const TargetInstrInfo *tii,
1657 const TargetRegisterInfo *tri,
1658 const TargetLowering *tli)
1659 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1662 bool isBottomUp() const { return SF::IsBottomUp; }
1664 bool isReady(SUnit *U) const {
1665 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1669 if (Queue.empty()) return NULL;
1671 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1676 void dump(ScheduleDAG *DAG) const {
1677 // Emulate pop() without clobbering NodeQueueIds.
1678 std::vector<SUnit*> DumpQueue = Queue;
1679 SF DumpPicker = Picker;
1680 while (!DumpQueue.empty()) {
1681 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1683 dbgs() << "Height " << SU->getHeight() << ": ";
1685 dbgs() << "Depth " << SU->getDepth() << ": ";
1691 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1692 BURegReductionPriorityQueue;
1694 typedef RegReductionPriorityQueue<td_ls_rr_sort>
1695 TDRegReductionPriorityQueue;
1697 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1698 SrcRegReductionPriorityQueue;
1700 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1701 HybridBURRPriorityQueue;
1703 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1704 ILPBURRPriorityQueue;
1705 } // end anonymous namespace
1707 //===----------------------------------------------------------------------===//
1708 // Static Node Priority for Register Pressure Reduction
1709 //===----------------------------------------------------------------------===//
1711 // Check for special nodes that bypass scheduling heuristics.
1712 // Currently this pushes TokenFactor nodes down, but may be used for other
1713 // pseudo-ops as well.
1715 // Return -1 to schedule right above left, 1 for left above right.
1716 // Return 0 if no bias exists.
1717 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1718 bool LSchedLow = left->isScheduleLow;
1719 bool RSchedLow = right->isScheduleLow;
1720 if (LSchedLow != RSchedLow)
1721 return LSchedLow < RSchedLow ? 1 : -1;
1725 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1726 /// Smaller number is the higher priority.
1728 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1729 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1730 if (SethiUllmanNumber != 0)
1731 return SethiUllmanNumber;
1734 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1736 if (I->isCtrl()) continue; // ignore chain preds
1737 SUnit *PredSU = I->getSUnit();
1738 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1739 if (PredSethiUllman > SethiUllmanNumber) {
1740 SethiUllmanNumber = PredSethiUllman;
1742 } else if (PredSethiUllman == SethiUllmanNumber)
1746 SethiUllmanNumber += Extra;
1748 if (SethiUllmanNumber == 0)
1749 SethiUllmanNumber = 1;
1751 return SethiUllmanNumber;
1754 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1755 /// scheduling units.
1756 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1757 SethiUllmanNumbers.assign(SUnits->size(), 0);
1759 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1760 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1763 void RegReductionPQBase::addNode(const SUnit *SU) {
1764 unsigned SUSize = SethiUllmanNumbers.size();
1765 if (SUnits->size() > SUSize)
1766 SethiUllmanNumbers.resize(SUSize*2, 0);
1767 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1770 void RegReductionPQBase::updateNode(const SUnit *SU) {
1771 SethiUllmanNumbers[SU->NodeNum] = 0;
1772 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1775 // Lower priority means schedule further down. For bottom-up scheduling, lower
1776 // priority SUs are scheduled before higher priority SUs.
1777 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1778 assert(SU->NodeNum < SethiUllmanNumbers.size());
1779 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1780 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1781 // CopyToReg should be close to its uses to facilitate coalescing and
1784 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1785 Opc == TargetOpcode::SUBREG_TO_REG ||
1786 Opc == TargetOpcode::INSERT_SUBREG)
1787 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1788 // close to their uses to facilitate coalescing.
1790 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1791 // If SU does not have a register use, i.e. it doesn't produce a value
1792 // that would be consumed (e.g. store), then it terminates a chain of
1793 // computation. Give it a large SethiUllman number so it will be
1794 // scheduled right before its predecessors that it doesn't lengthen
1795 // their live ranges.
1797 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1798 // If SU does not have a register def, schedule it close to its uses
1799 // because it does not lengthen any live ranges.
1802 return SethiUllmanNumbers[SU->NodeNum];
1804 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1806 // FIXME: This assumes all of the defs are used as call operands.
1807 int NP = (int)Priority - SU->getNode()->getNumValues();
1808 return (NP > 0) ? NP : 0;
1814 //===----------------------------------------------------------------------===//
1815 // Register Pressure Tracking
1816 //===----------------------------------------------------------------------===//
1818 void RegReductionPQBase::dumpRegPressure() const {
1819 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1820 E = TRI->regclass_end(); I != E; ++I) {
1821 const TargetRegisterClass *RC = *I;
1822 unsigned Id = RC->getID();
1823 unsigned RP = RegPressure[Id];
1825 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1830 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1834 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1838 SUnit *PredSU = I->getSUnit();
1839 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1840 // to cover the number of registers defined (they are all live).
1841 if (PredSU->NumRegDefsLeft == 0) {
1844 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1845 RegDefPos.IsValid(); RegDefPos.Advance()) {
1846 unsigned RCId, Cost;
1847 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1849 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1856 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1857 const SDNode *N = SU->getNode();
1859 if (!N->isMachineOpcode() || !SU->NumSuccs)
1862 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1863 for (unsigned i = 0; i != NumDefs; ++i) {
1864 EVT VT = N->getValueType(i);
1865 if (!N->hasAnyUseOfValue(i))
1867 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1868 if (RegPressure[RCId] >= RegLimit[RCId])
1874 // Compute the register pressure contribution by this instruction by count up
1875 // for uses that are not live and down for defs. Only count register classes
1876 // that are already under high pressure. As a side effect, compute the number of
1877 // uses of registers that are already live.
1879 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1880 // so could probably be factored.
1881 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1884 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1888 SUnit *PredSU = I->getSUnit();
1889 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1890 // to cover the number of registers defined (they are all live).
1891 if (PredSU->NumRegDefsLeft == 0) {
1892 if (PredSU->getNode()->isMachineOpcode())
1896 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1897 RegDefPos.IsValid(); RegDefPos.Advance()) {
1898 EVT VT = RegDefPos.GetValue();
1899 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1900 if (RegPressure[RCId] >= RegLimit[RCId])
1904 const SDNode *N = SU->getNode();
1906 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1909 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1910 for (unsigned i = 0; i != NumDefs; ++i) {
1911 EVT VT = N->getValueType(i);
1912 if (!N->hasAnyUseOfValue(i))
1914 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1915 if (RegPressure[RCId] >= RegLimit[RCId])
1921 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1922 if (!TracksRegPressure)
1928 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1932 SUnit *PredSU = I->getSUnit();
1933 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1934 // to cover the number of registers defined (they are all live).
1935 if (PredSU->NumRegDefsLeft == 0) {
1938 // FIXME: The ScheduleDAG currently loses information about which of a
1939 // node's values is consumed by each dependence. Consequently, if the node
1940 // defines multiple register classes, we don't know which to pressurize
1941 // here. Instead the following loop consumes the register defs in an
1942 // arbitrary order. At least it handles the common case of clustered loads
1943 // to the same class. For precise liveness, each SDep needs to indicate the
1944 // result number. But that tightly couples the ScheduleDAG with the
1945 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1946 // value type or register class to SDep.
1948 // The most important aspect of register tracking is balancing the increase
1949 // here with the reduction further below. Note that this SU may use multiple
1950 // defs in PredSU. The can't be determined here, but we've already
1951 // compensated by reducing NumRegDefsLeft in PredSU during
1952 // ScheduleDAGSDNodes::AddSchedEdges.
1953 --PredSU->NumRegDefsLeft;
1954 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1955 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1956 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1960 unsigned RCId, Cost;
1961 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1962 RegPressure[RCId] += Cost;
1967 // We should have this assert, but there may be dead SDNodes that never
1968 // materialize as SUnits, so they don't appear to generate liveness.
1969 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1970 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1971 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1972 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1973 if (SkipRegDefs > 0)
1975 unsigned RCId, Cost;
1976 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1977 if (RegPressure[RCId] < Cost) {
1978 // Register pressure tracking is imprecise. This can happen. But we try
1979 // hard not to let it happen because it likely results in poor scheduling.
1980 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
1981 RegPressure[RCId] = 0;
1984 RegPressure[RCId] -= Cost;
1990 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
1991 if (!TracksRegPressure)
1994 const SDNode *N = SU->getNode();
1997 if (!N->isMachineOpcode()) {
1998 if (N->getOpcode() != ISD::CopyToReg)
2001 unsigned Opc = N->getMachineOpcode();
2002 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2003 Opc == TargetOpcode::INSERT_SUBREG ||
2004 Opc == TargetOpcode::SUBREG_TO_REG ||
2005 Opc == TargetOpcode::REG_SEQUENCE ||
2006 Opc == TargetOpcode::IMPLICIT_DEF)
2010 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2014 SUnit *PredSU = I->getSUnit();
2015 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2016 // counts data deps.
2017 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2019 const SDNode *PN = PredSU->getNode();
2020 if (!PN->isMachineOpcode()) {
2021 if (PN->getOpcode() == ISD::CopyFromReg) {
2022 EVT VT = PN->getValueType(0);
2023 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2024 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2028 unsigned POpc = PN->getMachineOpcode();
2029 if (POpc == TargetOpcode::IMPLICIT_DEF)
2031 if (POpc == TargetOpcode::EXTRACT_SUBREG) {
2032 EVT VT = PN->getOperand(0).getValueType();
2033 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2034 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2036 } else if (POpc == TargetOpcode::INSERT_SUBREG ||
2037 POpc == TargetOpcode::SUBREG_TO_REG) {
2038 EVT VT = PN->getValueType(0);
2039 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2040 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2043 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2044 for (unsigned i = 0; i != NumDefs; ++i) {
2045 EVT VT = PN->getValueType(i);
2046 if (!PN->hasAnyUseOfValue(i))
2048 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2049 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2050 // Register pressure tracking is imprecise. This can happen.
2051 RegPressure[RCId] = 0;
2053 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2057 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2058 // may transfer data dependencies to CopyToReg.
2059 if (SU->NumSuccs && N->isMachineOpcode()) {
2060 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2061 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2062 EVT VT = N->getValueType(i);
2063 if (VT == MVT::Glue || VT == MVT::Other)
2065 if (!N->hasAnyUseOfValue(i))
2067 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2068 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2075 //===----------------------------------------------------------------------===//
2076 // Dynamic Node Priority for Register Pressure Reduction
2077 //===----------------------------------------------------------------------===//
2079 /// closestSucc - Returns the scheduled cycle of the successor which is
2080 /// closest to the current cycle.
2081 static unsigned closestSucc(const SUnit *SU) {
2082 unsigned MaxHeight = 0;
2083 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2085 if (I->isCtrl()) continue; // ignore chain succs
2086 unsigned Height = I->getSUnit()->getHeight();
2087 // If there are bunch of CopyToRegs stacked up, they should be considered
2088 // to be at the same position.
2089 if (I->getSUnit()->getNode() &&
2090 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2091 Height = closestSucc(I->getSUnit())+1;
2092 if (Height > MaxHeight)
2098 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2099 /// for scratch registers, i.e. number of data dependencies.
2100 static unsigned calcMaxScratches(const SUnit *SU) {
2101 unsigned Scratches = 0;
2102 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2104 if (I->isCtrl()) continue; // ignore chain preds
2110 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2111 /// CopyFromReg from a virtual register.
2112 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2113 bool RetVal = false;
2114 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2116 if (I->isCtrl()) continue;
2117 const SUnit *PredSU = I->getSUnit();
2118 if (PredSU->getNode() &&
2119 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2121 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2122 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2132 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2133 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2134 /// it has no other use. It should be scheduled closer to the terminator.
2135 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2136 bool RetVal = false;
2137 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2139 if (I->isCtrl()) continue;
2140 const SUnit *SuccSU = I->getSUnit();
2141 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2143 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2144 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2154 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2155 // set isVRegCycle for its CopyFromReg operands.
2157 // This is only relevant for single-block loops, in which case the VRegCycle
2158 // node is likely an induction variable in which the operand and target virtual
2159 // registers should be coalesced (e.g. pre/post increment values). Setting the
2160 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2161 // CopyFromReg so that this node becomes the virtual register "kill". This
2162 // avoids interference between the values live in and out of the block and
2163 // eliminates a copy inside the loop.
2164 static void initVRegCycle(SUnit *SU) {
2165 if (DisableSchedVRegCycle)
2168 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2171 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2173 SU->isVRegCycle = true;
2175 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2177 if (I->isCtrl()) continue;
2178 I->getSUnit()->isVRegCycle = true;
2182 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2183 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2184 static void resetVRegCycle(SUnit *SU) {
2185 if (!SU->isVRegCycle)
2188 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2190 if (I->isCtrl()) continue; // ignore chain preds
2191 SUnit *PredSU = I->getSUnit();
2192 if (PredSU->isVRegCycle) {
2193 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2194 "VRegCycle def must be CopyFromReg");
2195 I->getSUnit()->isVRegCycle = 0;
2200 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2201 // means a node that defines the VRegCycle has not been scheduled yet.
2202 static bool hasVRegCycleUse(const SUnit *SU) {
2203 // If this SU also defines the VReg, don't hoist it as a "use".
2204 if (SU->isVRegCycle)
2207 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2209 if (I->isCtrl()) continue; // ignore chain preds
2210 if (I->getSUnit()->isVRegCycle &&
2211 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2212 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2219 // Check for either a dependence (latency) or resource (hazard) stall.
2221 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2222 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2223 if ((int)SPQ->getCurCycle() < Height) return true;
2224 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2225 != ScheduleHazardRecognizer::NoHazard)
2230 // Return -1 if left has higher priority, 1 if right has higher priority.
2231 // Return 0 if latency-based priority is equivalent.
2232 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2233 RegReductionPQBase *SPQ) {
2234 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2235 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2236 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2237 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2238 int LHeight = (int)left->getHeight() + LPenalty;
2239 int RHeight = (int)right->getHeight() + RPenalty;
2241 bool LStall = (!checkPref || left->SchedulingPref == Sched::Latency) &&
2242 BUHasStall(left, LHeight, SPQ);
2243 bool RStall = (!checkPref || right->SchedulingPref == Sched::Latency) &&
2244 BUHasStall(right, RHeight, SPQ);
2246 // If scheduling one of the node will cause a pipeline stall, delay it.
2247 // If scheduling either one of the node will cause a pipeline stall, sort
2248 // them according to their height.
2251 DEBUG(++FactorCount[FactStall]);
2254 if (LHeight != RHeight) {
2255 DEBUG(++FactorCount[FactStall]);
2256 return LHeight > RHeight ? 1 : -1;
2258 } else if (RStall) {
2259 DEBUG(++FactorCount[FactStall]);
2263 // If either node is scheduling for latency, sort them by height/depth
2265 if (!checkPref || (left->SchedulingPref == Sched::Latency ||
2266 right->SchedulingPref == Sched::Latency)) {
2267 if (DisableSchedCycles) {
2268 if (LHeight != RHeight) {
2269 DEBUG(++FactorCount[FactHeight]);
2270 return LHeight > RHeight ? 1 : -1;
2274 // If neither instruction stalls (!LStall && !RStall) then
2275 // its height is already covered so only its depth matters. We also reach
2276 // this if both stall but have the same height.
2277 int LDepth = left->getDepth() - LPenalty;
2278 int RDepth = right->getDepth() - RPenalty;
2279 if (LDepth != RDepth) {
2280 DEBUG(++FactorCount[FactDepth]);
2281 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2282 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2283 << ") depth " << RDepth << "\n");
2284 return LDepth < RDepth ? 1 : -1;
2287 if (left->Latency != right->Latency) {
2288 DEBUG(++FactorCount[FactOther]);
2289 return left->Latency > right->Latency ? 1 : -1;
2295 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2296 // Schedule physical register definitions close to their use. This is
2297 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2298 // long as shortening physreg live ranges is generally good, we can defer
2299 // creating a subtarget hook.
2300 if (!DisableSchedPhysRegJoin) {
2301 bool LHasPhysReg = left->hasPhysRegDefs;
2302 bool RHasPhysReg = right->hasPhysRegDefs;
2303 if (LHasPhysReg != RHasPhysReg) {
2304 DEBUG(++FactorCount[FactRegUses]);
2306 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2308 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2309 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2310 << PhysRegMsg[RHasPhysReg] << "\n");
2311 return LHasPhysReg < RHasPhysReg;
2315 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2316 unsigned LPriority = SPQ->getNodePriority(left);
2317 unsigned RPriority = SPQ->getNodePriority(right);
2319 // Be really careful about hoisting call operands above previous calls.
2320 // Only allows it if it would reduce register pressure.
2321 if (left->isCall && right->isCallOp) {
2322 unsigned RNumVals = right->getNode()->getNumValues();
2323 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2325 if (right->isCall && left->isCallOp) {
2326 unsigned LNumVals = left->getNode()->getNumValues();
2327 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2330 if (LPriority != RPriority) {
2331 DEBUG(++FactorCount[FactStatic]);
2332 return LPriority > RPriority;
2335 // One or both of the nodes are calls and their sethi-ullman numbers are the
2336 // same, then keep source order.
2337 if (left->isCall || right->isCall) {
2338 unsigned LOrder = SPQ->getNodeOrdering(left);
2339 unsigned ROrder = SPQ->getNodeOrdering(right);
2341 // Prefer an ordering where the lower the non-zero order number, the higher
2343 if ((LOrder || ROrder) && LOrder != ROrder)
2344 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2347 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2352 // and the following instructions are both ready.
2356 // Then schedule t2 = op first.
2363 // This creates more short live intervals.
2364 unsigned LDist = closestSucc(left);
2365 unsigned RDist = closestSucc(right);
2366 if (LDist != RDist) {
2367 DEBUG(++FactorCount[FactOther]);
2368 return LDist < RDist;
2371 // How many registers becomes live when the node is scheduled.
2372 unsigned LScratch = calcMaxScratches(left);
2373 unsigned RScratch = calcMaxScratches(right);
2374 if (LScratch != RScratch) {
2375 DEBUG(++FactorCount[FactOther]);
2376 return LScratch > RScratch;
2379 // Comparing latency against a call makes little sense unless the node
2380 // is register pressure-neutral.
2381 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2382 return (left->NodeQueueId > right->NodeQueueId);
2384 // Do not compare latencies when one or both of the nodes are calls.
2385 if (!DisableSchedCycles &&
2386 !(left->isCall || right->isCall)) {
2387 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2392 if (left->getHeight() != right->getHeight()) {
2393 DEBUG(++FactorCount[FactHeight]);
2394 return left->getHeight() > right->getHeight();
2397 if (left->getDepth() != right->getDepth()) {
2398 DEBUG(++FactorCount[FactDepth]);
2399 return left->getDepth() < right->getDepth();
2403 assert(left->NodeQueueId && right->NodeQueueId &&
2404 "NodeQueueId cannot be zero");
2405 DEBUG(++FactorCount[FactOther]);
2406 return (left->NodeQueueId > right->NodeQueueId);
2410 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2411 if (int res = checkSpecialNodes(left, right))
2414 return BURRSort(left, right, SPQ);
2417 // Source order, otherwise bottom up.
2418 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2419 if (int res = checkSpecialNodes(left, right))
2422 unsigned LOrder = SPQ->getNodeOrdering(left);
2423 unsigned ROrder = SPQ->getNodeOrdering(right);
2425 // Prefer an ordering where the lower the non-zero order number, the higher
2427 if ((LOrder || ROrder) && LOrder != ROrder)
2428 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2430 return BURRSort(left, right, SPQ);
2433 // If the time between now and when the instruction will be ready can cover
2434 // the spill code, then avoid adding it to the ready queue. This gives long
2435 // stalls highest priority and allows hoisting across calls. It should also
2436 // speed up processing the available queue.
2437 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2438 static const unsigned ReadyDelay = 3;
2440 if (SPQ->MayReduceRegPressure(SU)) return true;
2442 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2444 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2445 != ScheduleHazardRecognizer::NoHazard)
2451 // Return true if right should be scheduled with higher priority than left.
2452 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2453 if (int res = checkSpecialNodes(left, right))
2456 if (left->isCall || right->isCall)
2457 // No way to compute latency of calls.
2458 return BURRSort(left, right, SPQ);
2460 bool LHigh = SPQ->HighRegPressure(left);
2461 bool RHigh = SPQ->HighRegPressure(right);
2462 // Avoid causing spills. If register pressure is high, schedule for
2463 // register pressure reduction.
2464 if (LHigh && !RHigh) {
2465 DEBUG(++FactorCount[FactPressureDiff]);
2466 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2467 << right->NodeNum << ")\n");
2470 else if (!LHigh && RHigh) {
2471 DEBUG(++FactorCount[FactPressureDiff]);
2472 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2473 << left->NodeNum << ")\n");
2476 if (!LHigh && !RHigh) {
2477 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2481 return BURRSort(left, right, SPQ);
2484 // Schedule as many instructions in each cycle as possible. So don't make an
2485 // instruction available unless it is ready in the current cycle.
2486 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2487 if (SU->getHeight() > CurCycle) return false;
2489 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2490 != ScheduleHazardRecognizer::NoHazard)
2496 static bool canEnableCoalescing(SUnit *SU) {
2497 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2498 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2499 // CopyToReg should be close to its uses to facilitate coalescing and
2503 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2504 Opc == TargetOpcode::SUBREG_TO_REG ||
2505 Opc == TargetOpcode::INSERT_SUBREG)
2506 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2507 // close to their uses to facilitate coalescing.
2510 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2511 // If SU does not have a register def, schedule it close to its uses
2512 // because it does not lengthen any live ranges.
2518 // list-ilp is currently an experimental scheduler that allows various
2519 // heuristics to be enabled prior to the normal register reduction logic.
2520 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2521 if (int res = checkSpecialNodes(left, right))
2524 if (left->isCall || right->isCall)
2525 // No way to compute latency of calls.
2526 return BURRSort(left, right, SPQ);
2528 unsigned LLiveUses = 0, RLiveUses = 0;
2529 int LPDiff = 0, RPDiff = 0;
2530 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2531 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2532 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2534 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2535 DEBUG(++FactorCount[FactPressureDiff]);
2536 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2537 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2538 return LPDiff > RPDiff;
2541 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2542 bool LReduce = canEnableCoalescing(left);
2543 bool RReduce = canEnableCoalescing(right);
2544 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2545 if (LReduce && !RReduce) return false;
2546 if (RReduce && !LReduce) return true;
2549 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2550 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2551 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2552 DEBUG(++FactorCount[FactRegUses]);
2553 return LLiveUses < RLiveUses;
2556 if (!DisableSchedStalls) {
2557 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2558 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2559 if (LStall != RStall) {
2560 DEBUG(++FactorCount[FactHeight]);
2561 return left->getHeight() > right->getHeight();
2565 if (!DisableSchedCriticalPath) {
2566 int spread = (int)left->getDepth() - (int)right->getDepth();
2567 if (std::abs(spread) > MaxReorderWindow) {
2568 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2569 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2570 << right->getDepth() << "\n");
2571 DEBUG(++FactorCount[FactDepth]);
2572 return left->getDepth() < right->getDepth();
2576 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2577 int spread = (int)left->getHeight() - (int)right->getHeight();
2578 if (std::abs(spread) > MaxReorderWindow) {
2579 DEBUG(++FactorCount[FactHeight]);
2580 return left->getHeight() > right->getHeight();
2584 return BURRSort(left, right, SPQ);
2587 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2589 // Add pseudo dependency edges for two-address nodes.
2590 AddPseudoTwoAddrDeps();
2591 // Reroute edges to nodes with multiple uses.
2592 if (!TracksRegPressure)
2593 PrescheduleNodesWithMultipleUses();
2594 // Calculate node priorities.
2595 CalculateSethiUllmanNumbers();
2597 // For single block loops, mark nodes that look like canonical IV increments.
2598 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2599 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2600 initVRegCycle(&sunits[i]);
2605 //===----------------------------------------------------------------------===//
2606 // Preschedule for Register Pressure
2607 //===----------------------------------------------------------------------===//
2609 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2610 if (SU->isTwoAddress) {
2611 unsigned Opc = SU->getNode()->getMachineOpcode();
2612 const TargetInstrDesc &TID = TII->get(Opc);
2613 unsigned NumRes = TID.getNumDefs();
2614 unsigned NumOps = TID.getNumOperands() - NumRes;
2615 for (unsigned i = 0; i != NumOps; ++i) {
2616 if (TID.getOperandConstraint(i+NumRes, TOI::TIED_TO) != -1) {
2617 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2618 if (DU->getNodeId() != -1 &&
2619 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2627 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2628 /// physical register defs.
2629 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2630 const TargetInstrInfo *TII,
2631 const TargetRegisterInfo *TRI) {
2632 SDNode *N = SuccSU->getNode();
2633 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2634 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2635 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2636 for (const SDNode *SUNode = SU->getNode(); SUNode;
2637 SUNode = SUNode->getGluedNode()) {
2638 if (!SUNode->isMachineOpcode())
2640 const unsigned *SUImpDefs =
2641 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2644 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2645 EVT VT = N->getValueType(i);
2646 if (VT == MVT::Glue || VT == MVT::Other)
2648 if (!N->hasAnyUseOfValue(i))
2650 unsigned Reg = ImpDefs[i - NumDefs];
2651 for (;*SUImpDefs; ++SUImpDefs) {
2652 unsigned SUReg = *SUImpDefs;
2653 if (TRI->regsOverlap(Reg, SUReg))
2661 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2662 /// are not handled well by the general register pressure reduction
2663 /// heuristics. When presented with code like this:
2672 /// the heuristics tend to push the store up, but since the
2673 /// operand of the store has another use (U), this would increase
2674 /// the length of that other use (the U->N edge).
2676 /// This function transforms code like the above to route U's
2677 /// dependence through the store when possible, like this:
2688 /// This results in the store being scheduled immediately
2689 /// after N, which shortens the U->N live range, reducing
2690 /// register pressure.
2692 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2693 // Visit all the nodes in topological order, working top-down.
2694 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2695 SUnit *SU = &(*SUnits)[i];
2696 // For now, only look at nodes with no data successors, such as stores.
2697 // These are especially important, due to the heuristics in
2698 // getNodePriority for nodes with no data successors.
2699 if (SU->NumSuccs != 0)
2701 // For now, only look at nodes with exactly one data predecessor.
2702 if (SU->NumPreds != 1)
2704 // Avoid prescheduling copies to virtual registers, which don't behave
2705 // like other nodes from the perspective of scheduling heuristics.
2706 if (SDNode *N = SU->getNode())
2707 if (N->getOpcode() == ISD::CopyToReg &&
2708 TargetRegisterInfo::isVirtualRegister
2709 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2712 // Locate the single data predecessor.
2714 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2715 EE = SU->Preds.end(); II != EE; ++II)
2716 if (!II->isCtrl()) {
2717 PredSU = II->getSUnit();
2722 // Don't rewrite edges that carry physregs, because that requires additional
2723 // support infrastructure.
2724 if (PredSU->hasPhysRegDefs)
2726 // Short-circuit the case where SU is PredSU's only data successor.
2727 if (PredSU->NumSuccs == 1)
2729 // Avoid prescheduling to copies from virtual registers, which don't behave
2730 // like other nodes from the perspective of scheduling heuristics.
2731 if (SDNode *N = SU->getNode())
2732 if (N->getOpcode() == ISD::CopyFromReg &&
2733 TargetRegisterInfo::isVirtualRegister
2734 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2737 // Perform checks on the successors of PredSU.
2738 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2739 EE = PredSU->Succs.end(); II != EE; ++II) {
2740 SUnit *PredSuccSU = II->getSUnit();
2741 if (PredSuccSU == SU) continue;
2742 // If PredSU has another successor with no data successors, for
2743 // now don't attempt to choose either over the other.
2744 if (PredSuccSU->NumSuccs == 0)
2745 goto outer_loop_continue;
2746 // Don't break physical register dependencies.
2747 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2748 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2749 goto outer_loop_continue;
2750 // Don't introduce graph cycles.
2751 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2752 goto outer_loop_continue;
2755 // Ok, the transformation is safe and the heuristics suggest it is
2756 // profitable. Update the graph.
2757 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2758 << " next to PredSU #" << PredSU->NodeNum
2759 << " to guide scheduling in the presence of multiple uses\n");
2760 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2761 SDep Edge = PredSU->Succs[i];
2762 assert(!Edge.isAssignedRegDep());
2763 SUnit *SuccSU = Edge.getSUnit();
2765 Edge.setSUnit(PredSU);
2766 scheduleDAG->RemovePred(SuccSU, Edge);
2767 scheduleDAG->AddPred(SU, Edge);
2769 scheduleDAG->AddPred(SuccSU, Edge);
2773 outer_loop_continue:;
2777 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2778 /// it as a def&use operand. Add a pseudo control edge from it to the other
2779 /// node (if it won't create a cycle) so the two-address one will be scheduled
2780 /// first (lower in the schedule). If both nodes are two-address, favor the
2781 /// one that has a CopyToReg use (more likely to be a loop induction update).
2782 /// If both are two-address, but one is commutable while the other is not
2783 /// commutable, favor the one that's not commutable.
2784 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2785 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2786 SUnit *SU = &(*SUnits)[i];
2787 if (!SU->isTwoAddress)
2790 SDNode *Node = SU->getNode();
2791 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2794 bool isLiveOut = hasOnlyLiveOutUses(SU);
2795 unsigned Opc = Node->getMachineOpcode();
2796 const TargetInstrDesc &TID = TII->get(Opc);
2797 unsigned NumRes = TID.getNumDefs();
2798 unsigned NumOps = TID.getNumOperands() - NumRes;
2799 for (unsigned j = 0; j != NumOps; ++j) {
2800 if (TID.getOperandConstraint(j+NumRes, TOI::TIED_TO) == -1)
2802 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2803 if (DU->getNodeId() == -1)
2805 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2806 if (!DUSU) continue;
2807 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2808 E = DUSU->Succs.end(); I != E; ++I) {
2809 if (I->isCtrl()) continue;
2810 SUnit *SuccSU = I->getSUnit();
2813 // Be conservative. Ignore if nodes aren't at roughly the same
2814 // depth and height.
2815 if (SuccSU->getHeight() < SU->getHeight() &&
2816 (SU->getHeight() - SuccSU->getHeight()) > 1)
2818 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2819 // constrains whatever is using the copy, instead of the copy
2820 // itself. In the case that the copy is coalesced, this
2821 // preserves the intent of the pseudo two-address heurietics.
2822 while (SuccSU->Succs.size() == 1 &&
2823 SuccSU->getNode()->isMachineOpcode() &&
2824 SuccSU->getNode()->getMachineOpcode() ==
2825 TargetOpcode::COPY_TO_REGCLASS)
2826 SuccSU = SuccSU->Succs.front().getSUnit();
2827 // Don't constrain non-instruction nodes.
2828 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2830 // Don't constrain nodes with physical register defs if the
2831 // predecessor can clobber them.
2832 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2833 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2836 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2837 // these may be coalesced away. We want them close to their uses.
2838 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2839 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2840 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2841 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2843 if ((!canClobber(SuccSU, DUSU) ||
2844 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2845 (!SU->isCommutable && SuccSU->isCommutable)) &&
2846 !scheduleDAG->IsReachable(SuccSU, SU)) {
2847 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2848 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2849 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2850 /*Reg=*/0, /*isNormalMemory=*/false,
2851 /*isMustAlias=*/false,
2852 /*isArtificial=*/true));
2859 /// LimitedSumOfUnscheduledPredsOfSuccs - Compute the sum of the unscheduled
2860 /// predecessors of the successors of the SUnit SU. Stop when the provided
2861 /// limit is exceeded.
2862 static unsigned LimitedSumOfUnscheduledPredsOfSuccs(const SUnit *SU,
2865 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2867 const SUnit *SuccSU = I->getSUnit();
2868 for (SUnit::const_pred_iterator II = SuccSU->Preds.begin(),
2869 EE = SuccSU->Preds.end(); II != EE; ++II) {
2870 SUnit *PredSU = II->getSUnit();
2871 if (!PredSU->isScheduled)
2881 bool td_ls_rr_sort::operator()(const SUnit *left, const SUnit *right) const {
2882 if (int res = checkSpecialNodes(left, right))
2885 unsigned LPriority = SPQ->getNodePriority(left);
2886 unsigned RPriority = SPQ->getNodePriority(right);
2887 bool LIsTarget = left->getNode() && left->getNode()->isMachineOpcode();
2888 bool RIsTarget = right->getNode() && right->getNode()->isMachineOpcode();
2889 bool LIsFloater = LIsTarget && left->NumPreds == 0;
2890 bool RIsFloater = RIsTarget && right->NumPreds == 0;
2891 unsigned LBonus = (LimitedSumOfUnscheduledPredsOfSuccs(left,1) == 1) ? 2 : 0;
2892 unsigned RBonus = (LimitedSumOfUnscheduledPredsOfSuccs(right,1) == 1) ? 2 : 0;
2894 if (left->NumSuccs == 0 && right->NumSuccs != 0)
2896 else if (left->NumSuccs != 0 && right->NumSuccs == 0)
2903 if (left->NumSuccs == 1)
2905 if (right->NumSuccs == 1)
2908 if (LPriority+LBonus != RPriority+RBonus)
2909 return LPriority+LBonus < RPriority+RBonus;
2911 if (left->getDepth() != right->getDepth())
2912 return left->getDepth() < right->getDepth();
2914 if (left->NumSuccsLeft != right->NumSuccsLeft)
2915 return left->NumSuccsLeft > right->NumSuccsLeft;
2917 assert(left->NodeQueueId && right->NodeQueueId &&
2918 "NodeQueueId cannot be zero");
2919 return (left->NodeQueueId > right->NodeQueueId);
2922 //===----------------------------------------------------------------------===//
2923 // Public Constructor Functions
2924 //===----------------------------------------------------------------------===//
2926 llvm::ScheduleDAGSDNodes *
2927 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2928 CodeGenOpt::Level OptLevel) {
2929 const TargetMachine &TM = IS->TM;
2930 const TargetInstrInfo *TII = TM.getInstrInfo();
2931 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2933 BURegReductionPriorityQueue *PQ =
2934 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2935 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2936 PQ->setScheduleDAG(SD);
2940 llvm::ScheduleDAGSDNodes *
2941 llvm::createTDRRListDAGScheduler(SelectionDAGISel *IS,
2942 CodeGenOpt::Level OptLevel) {
2943 const TargetMachine &TM = IS->TM;
2944 const TargetInstrInfo *TII = TM.getInstrInfo();
2945 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2947 TDRegReductionPriorityQueue *PQ =
2948 new TDRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2949 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2950 PQ->setScheduleDAG(SD);
2954 llvm::ScheduleDAGSDNodes *
2955 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2956 CodeGenOpt::Level OptLevel) {
2957 const TargetMachine &TM = IS->TM;
2958 const TargetInstrInfo *TII = TM.getInstrInfo();
2959 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2961 SrcRegReductionPriorityQueue *PQ =
2962 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2963 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2964 PQ->setScheduleDAG(SD);
2968 llvm::ScheduleDAGSDNodes *
2969 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2970 CodeGenOpt::Level OptLevel) {
2971 const TargetMachine &TM = IS->TM;
2972 const TargetInstrInfo *TII = TM.getInstrInfo();
2973 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2974 const TargetLowering *TLI = &IS->getTargetLowering();
2976 HybridBURRPriorityQueue *PQ =
2977 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2979 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2980 PQ->setScheduleDAG(SD);
2984 llvm::ScheduleDAGSDNodes *
2985 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2986 CodeGenOpt::Level OptLevel) {
2987 const TargetMachine &TM = IS->TM;
2988 const TargetInstrInfo *TII = TM.getInstrInfo();
2989 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2990 const TargetLowering *TLI = &IS->getTargetLowering();
2992 ILPBURRPriorityQueue *PQ =
2993 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2994 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2995 PQ->setScheduleDAG(SD);