1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 sourceListDAGScheduler("source",
49 "Similar to list-burr but schedules in source "
50 "order when possible",
51 createSourceListDAGScheduler);
53 static RegisterScheduler
54 hybridListDAGScheduler("list-hybrid",
55 "Bottom-up register pressure aware list scheduling "
56 "which tries to balance latency and register pressure",
57 createHybridListDAGScheduler);
59 static RegisterScheduler
60 ILPListDAGScheduler("list-ilp",
61 "Bottom-up register pressure aware list scheduling "
62 "which tries to balance ILP and register pressure",
63 createILPListDAGScheduler);
65 static cl::opt<bool> DisableSchedCycles(
66 "disable-sched-cycles", cl::Hidden, cl::init(false),
67 cl::desc("Disable cycle-level precision during preRA scheduling"));
69 // Temporary sched=list-ilp flags until the heuristics are robust.
70 // Some options are also available under sched=list-hybrid.
71 static cl::opt<bool> DisableSchedRegPressure(
72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
73 cl::desc("Disable regpressure priority in sched=list-ilp"));
74 static cl::opt<bool> DisableSchedLiveUses(
75 "disable-sched-live-uses", cl::Hidden, cl::init(true),
76 cl::desc("Disable live use priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedVRegCycle(
78 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
79 cl::desc("Disable virtual register cycle interference checks"));
80 static cl::opt<bool> DisableSchedPhysRegJoin(
81 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
82 cl::desc("Disable physreg def-use affinity"));
83 static cl::opt<bool> DisableSchedStalls(
84 "disable-sched-stalls", cl::Hidden, cl::init(true),
85 cl::desc("Disable no-stall priority in sched=list-ilp"));
86 static cl::opt<bool> DisableSchedCriticalPath(
87 "disable-sched-critical-path", cl::Hidden, cl::init(false),
88 cl::desc("Disable critical path priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedHeight(
90 "disable-sched-height", cl::Hidden, cl::init(false),
91 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
92 static cl::opt<bool> Disable2AddrHack(
93 "disable-2addr-hack", cl::Hidden, cl::init(true),
94 cl::desc("Disable scheduler's two-address hack"));
96 static cl::opt<int> MaxReorderWindow(
97 "max-sched-reorder", cl::Hidden, cl::init(6),
98 cl::desc("Number of instructions to allow ahead of the critical path "
99 "in sched=list-ilp"));
101 static cl::opt<unsigned> AvgIPC(
102 "sched-avg-ipc", cl::Hidden, cl::init(1),
103 cl::desc("Average inst/cycle whan no target itinerary exists."));
107 // For sched=list-ilp, Count the number of times each factor comes into play.
108 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
109 FactStatic, FactOther, NumFactors };
111 static const char *FactorName[NumFactors] =
112 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
113 static int FactorCount[NumFactors];
117 //===----------------------------------------------------------------------===//
118 /// ScheduleDAGRRList - The actual register reduction list scheduler
119 /// implementation. This supports both top-down and bottom-up scheduling.
121 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
123 /// NeedLatency - True if the scheduler will make use of latency information.
127 /// AvailableQueue - The priority queue to use for the available SUnits.
128 SchedulingPriorityQueue *AvailableQueue;
130 /// PendingQueue - This contains all of the instructions whose operands have
131 /// been issued, but their results are not ready yet (due to the latency of
132 /// the operation). Once the operands becomes available, the instruction is
133 /// added to the AvailableQueue.
134 std::vector<SUnit*> PendingQueue;
136 /// HazardRec - The hazard recognizer to use.
137 ScheduleHazardRecognizer *HazardRec;
139 /// CurCycle - The current scheduler state corresponds to this cycle.
142 /// MinAvailableCycle - Cycle of the soonest available instruction.
143 unsigned MinAvailableCycle;
145 /// IssueCount - Count instructions issued in this cycle
146 /// Currently valid only for bottom-up scheduling.
149 /// LiveRegDefs - A set of physical registers and their definition
150 /// that are "live". These nodes must be scheduled before any other nodes that
151 /// modifies the registers can be scheduled.
152 unsigned NumLiveRegs;
153 std::vector<SUnit*> LiveRegDefs;
154 std::vector<SUnit*> LiveRegGens;
156 /// Topo - A topological ordering for SUnits which permits fast IsReachable
157 /// and similar queries.
158 ScheduleDAGTopologicalSort Topo;
161 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
162 SchedulingPriorityQueue *availqueue,
163 CodeGenOpt::Level OptLevel)
164 : ScheduleDAGSDNodes(mf),
165 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
168 const TargetMachine &tm = mf.getTarget();
169 if (DisableSchedCycles || !NeedLatency)
170 HazardRec = new ScheduleHazardRecognizer();
172 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
175 ~ScheduleDAGRRList() {
177 delete AvailableQueue;
182 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
184 /// IsReachable - Checks if SU is reachable from TargetSU.
185 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
186 return Topo.IsReachable(SU, TargetSU);
189 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
191 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
192 return Topo.WillCreateCycle(SU, TargetSU);
195 /// AddPred - adds a predecessor edge to SUnit SU.
196 /// This returns true if this is a new predecessor.
197 /// Updates the topological ordering if required.
198 void AddPred(SUnit *SU, const SDep &D) {
199 Topo.AddPred(SU, D.getSUnit());
203 /// RemovePred - removes a predecessor edge from SUnit SU.
204 /// This returns true if an edge was removed.
205 /// Updates the topological ordering if required.
206 void RemovePred(SUnit *SU, const SDep &D) {
207 Topo.RemovePred(SU, D.getSUnit());
212 bool isReady(SUnit *SU) {
213 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
214 AvailableQueue->isReady(SU);
217 void ReleasePred(SUnit *SU, const SDep *PredEdge);
218 void ReleasePredecessors(SUnit *SU);
219 void ReleasePending();
220 void AdvanceToCycle(unsigned NextCycle);
221 void AdvancePastStalls(SUnit *SU);
222 void EmitNode(SUnit *SU);
223 void ScheduleNodeBottomUp(SUnit*);
224 void CapturePred(SDep *PredEdge);
225 void UnscheduleNodeBottomUp(SUnit*);
226 void RestoreHazardCheckerBottomUp();
227 void BacktrackBottomUp(SUnit*, SUnit*);
228 SUnit *CopyAndMoveSuccessors(SUnit*);
229 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
230 const TargetRegisterClass*,
231 const TargetRegisterClass*,
232 SmallVector<SUnit*, 2>&);
233 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
235 SUnit *PickNodeToScheduleBottomUp();
236 void ListScheduleBottomUp();
238 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
239 /// Updates the topological ordering if required.
240 SUnit *CreateNewSUnit(SDNode *N) {
241 unsigned NumSUnits = SUnits.size();
242 SUnit *NewNode = NewSUnit(N);
243 // Update the topological ordering.
244 if (NewNode->NodeNum >= NumSUnits)
245 Topo.InitDAGTopologicalSorting();
249 /// CreateClone - Creates a new SUnit from an existing one.
250 /// Updates the topological ordering if required.
251 SUnit *CreateClone(SUnit *N) {
252 unsigned NumSUnits = SUnits.size();
253 SUnit *NewNode = Clone(N);
254 // Update the topological ordering.
255 if (NewNode->NodeNum >= NumSUnits)
256 Topo.InitDAGTopologicalSorting();
260 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
261 /// need actual latency information but the hybrid scheduler does.
262 bool ForceUnitLatencies() const {
266 } // end anonymous namespace
268 /// GetCostForDef - Looks up the register class and cost for a given definition.
269 /// Typically this just means looking up the representative register class,
270 /// but for untyped values (MVT::untyped) it means inspecting the node's
271 /// opcode to determine what register class is being generated.
272 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
273 const TargetLowering *TLI,
274 const TargetInstrInfo *TII,
275 const TargetRegisterInfo *TRI,
276 unsigned &RegClass, unsigned &Cost) {
277 EVT VT = RegDefPos.GetValue();
279 // Special handling for untyped values. These values can only come from
280 // the expansion of custom DAG-to-DAG patterns.
281 if (VT == MVT::untyped) {
282 const SDNode *Node = RegDefPos.GetNode();
283 unsigned Opcode = Node->getMachineOpcode();
285 if (Opcode == TargetOpcode::REG_SEQUENCE) {
286 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
287 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
288 RegClass = RC->getID();
293 unsigned Idx = RegDefPos.GetIdx();
294 const MCInstrDesc Desc = TII->get(Opcode);
295 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
296 RegClass = RC->getID();
297 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
298 // better way to determine it.
301 RegClass = TLI->getRepRegClassFor(VT)->getID();
302 Cost = TLI->getRepRegClassCostFor(VT);
306 /// Schedule - Schedule the DAG using list scheduling.
307 void ScheduleDAGRRList::Schedule() {
309 << "********** List Scheduling BB#" << BB->getNumber()
310 << " '" << BB->getName() << "' **********\n");
312 for (int i = 0; i < NumFactors; ++i) {
319 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
321 // Allocate slots for each physical register, plus one for a special register
322 // to track the virtual resource of a calling sequence.
323 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
324 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
326 // Build the scheduling graph.
327 BuildSchedGraph(NULL);
329 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
330 SUnits[su].dumpAll(this));
331 Topo.InitDAGTopologicalSorting();
333 AvailableQueue->initNodes(SUnits);
337 // Execute the actual scheduling loop.
338 ListScheduleBottomUp();
341 for (int i = 0; i < NumFactors; ++i) {
342 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
345 AvailableQueue->releaseState();
348 //===----------------------------------------------------------------------===//
349 // Bottom-Up Scheduling
350 //===----------------------------------------------------------------------===//
352 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
353 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
354 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
355 SUnit *PredSU = PredEdge->getSUnit();
358 if (PredSU->NumSuccsLeft == 0) {
359 dbgs() << "*** Scheduling failed! ***\n";
361 dbgs() << " has been released too many times!\n";
365 --PredSU->NumSuccsLeft;
367 if (!ForceUnitLatencies()) {
368 // Updating predecessor's height. This is now the cycle when the
369 // predecessor can be scheduled without causing a pipeline stall.
370 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
373 // If all the node's successors are scheduled, this node is ready
374 // to be scheduled. Ignore the special EntrySU node.
375 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
376 PredSU->isAvailable = true;
378 unsigned Height = PredSU->getHeight();
379 if (Height < MinAvailableCycle)
380 MinAvailableCycle = Height;
382 if (isReady(PredSU)) {
383 AvailableQueue->push(PredSU);
385 // CapturePred and others may have left the node in the pending queue, avoid
387 else if (!PredSU->isPending) {
388 PredSU->isPending = true;
389 PendingQueue.push_back(PredSU);
394 /// IsChainDependent - Test if Outer is reachable from Inner through
395 /// chain dependencies.
396 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
398 const TargetInstrInfo *TII) {
403 // For a TokenFactor, examine each operand. There may be multiple ways
404 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
405 // most nesting in order to ensure that we find the corresponding match.
406 if (N->getOpcode() == ISD::TokenFactor) {
407 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
408 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
412 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
413 if (N->isMachineOpcode()) {
414 if (N->getMachineOpcode() ==
415 (unsigned)TII->getCallFrameDestroyOpcode()) {
417 } else if (N->getMachineOpcode() ==
418 (unsigned)TII->getCallFrameSetupOpcode()) {
424 // Otherwise, find the chain and continue climbing.
425 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
426 if (N->getOperand(i).getValueType() == MVT::Other) {
427 N = N->getOperand(i).getNode();
428 goto found_chain_operand;
431 found_chain_operand:;
432 if (N->getOpcode() == ISD::EntryToken)
437 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
438 /// the corresponding (lowered) CALLSEQ_BEGIN node.
440 /// NestLevel and MaxNested are used in recursion to indcate the current level
441 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
442 /// level seen so far.
444 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
445 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
447 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
448 const TargetInstrInfo *TII) {
450 // For a TokenFactor, examine each operand. There may be multiple ways
451 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
452 // most nesting in order to ensure that we find the corresponding match.
453 if (N->getOpcode() == ISD::TokenFactor) {
455 unsigned BestMaxNest = MaxNest;
456 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
457 unsigned MyNestLevel = NestLevel;
458 unsigned MyMaxNest = MaxNest;
459 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
460 MyNestLevel, MyMaxNest, TII))
461 if (!Best || (MyMaxNest > BestMaxNest)) {
463 BestMaxNest = MyMaxNest;
467 MaxNest = BestMaxNest;
470 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
471 if (N->isMachineOpcode()) {
472 if (N->getMachineOpcode() ==
473 (unsigned)TII->getCallFrameDestroyOpcode()) {
475 MaxNest = std::max(MaxNest, NestLevel);
476 } else if (N->getMachineOpcode() ==
477 (unsigned)TII->getCallFrameSetupOpcode()) {
478 assert(NestLevel != 0);
484 // Otherwise, find the chain and continue climbing.
485 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
486 if (N->getOperand(i).getValueType() == MVT::Other) {
487 N = N->getOperand(i).getNode();
488 goto found_chain_operand;
491 found_chain_operand:;
492 if (N->getOpcode() == ISD::EntryToken)
497 /// Call ReleasePred for each predecessor, then update register live def/gen.
498 /// Always update LiveRegDefs for a register dependence even if the current SU
499 /// also defines the register. This effectively create one large live range
500 /// across a sequence of two-address node. This is important because the
501 /// entire chain must be scheduled together. Example:
504 /// flags = (2) addc flags
505 /// flags = (1) addc flags
509 /// LiveRegDefs[flags] = 3
510 /// LiveRegGens[flags] = 1
512 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
513 /// interference on flags.
514 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
515 // Bottom up: release predecessors
516 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
518 ReleasePred(SU, &*I);
519 if (I->isAssignedRegDep()) {
520 // This is a physical register dependency and it's impossible or
521 // expensive to copy the register. Make sure nothing that can
522 // clobber the register is scheduled between the predecessor and
524 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
525 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
526 "interference on register dependence");
527 LiveRegDefs[I->getReg()] = I->getSUnit();
528 if (!LiveRegGens[I->getReg()]) {
530 LiveRegGens[I->getReg()] = SU;
535 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
536 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
537 // these nodes, to prevent other calls from being interscheduled with them.
538 unsigned CallResource = TRI->getNumRegs();
539 if (!LiveRegDefs[CallResource])
540 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
541 if (Node->isMachineOpcode() &&
542 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
543 unsigned NestLevel = 0;
544 unsigned MaxNest = 0;
545 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
547 SUnit *Def = &SUnits[N->getNodeId()];
549 LiveRegDefs[CallResource] = Def;
550 LiveRegGens[CallResource] = SU;
555 /// Check to see if any of the pending instructions are ready to issue. If
556 /// so, add them to the available queue.
557 void ScheduleDAGRRList::ReleasePending() {
558 if (DisableSchedCycles) {
559 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
563 // If the available queue is empty, it is safe to reset MinAvailableCycle.
564 if (AvailableQueue->empty())
565 MinAvailableCycle = UINT_MAX;
567 // Check to see if any of the pending instructions are ready to issue. If
568 // so, add them to the available queue.
569 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
570 unsigned ReadyCycle = PendingQueue[i]->getHeight();
571 if (ReadyCycle < MinAvailableCycle)
572 MinAvailableCycle = ReadyCycle;
574 if (PendingQueue[i]->isAvailable) {
575 if (!isReady(PendingQueue[i]))
577 AvailableQueue->push(PendingQueue[i]);
579 PendingQueue[i]->isPending = false;
580 PendingQueue[i] = PendingQueue.back();
581 PendingQueue.pop_back();
586 /// Move the scheduler state forward by the specified number of Cycles.
587 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
588 if (NextCycle <= CurCycle)
592 AvailableQueue->setCurCycle(NextCycle);
593 if (!HazardRec->isEnabled()) {
594 // Bypass lots of virtual calls in case of long latency.
595 CurCycle = NextCycle;
598 for (; CurCycle != NextCycle; ++CurCycle) {
599 HazardRec->RecedeCycle();
602 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
603 // available Q to release pending nodes at least once before popping.
607 /// Move the scheduler state forward until the specified node's dependents are
608 /// ready and can be scheduled with no resource conflicts.
609 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
610 if (DisableSchedCycles)
613 // FIXME: Nodes such as CopyFromReg probably should not advance the current
614 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
615 // has predecessors the cycle will be advanced when they are scheduled.
616 // But given the crude nature of modeling latency though such nodes, we
617 // currently need to treat these nodes like real instructions.
618 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
620 unsigned ReadyCycle = SU->getHeight();
622 // Bump CurCycle to account for latency. We assume the latency of other
623 // available instructions may be hidden by the stall (not a full pipe stall).
624 // This updates the hazard recognizer's cycle before reserving resources for
626 AdvanceToCycle(ReadyCycle);
628 // Calls are scheduled in their preceding cycle, so don't conflict with
629 // hazards from instructions after the call. EmitNode will reset the
630 // scoreboard state before emitting the call.
634 // FIXME: For resource conflicts in very long non-pipelined stages, we
635 // should probably skip ahead here to avoid useless scoreboard checks.
638 ScheduleHazardRecognizer::HazardType HT =
639 HazardRec->getHazardType(SU, -Stalls);
641 if (HT == ScheduleHazardRecognizer::NoHazard)
646 AdvanceToCycle(CurCycle + Stalls);
649 /// Record this SUnit in the HazardRecognizer.
650 /// Does not update CurCycle.
651 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
652 if (!HazardRec->isEnabled())
655 // Check for phys reg copy.
659 switch (SU->getNode()->getOpcode()) {
661 assert(SU->getNode()->isMachineOpcode() &&
662 "This target-independent node should not be scheduled.");
664 case ISD::MERGE_VALUES:
665 case ISD::TokenFactor:
667 case ISD::CopyFromReg:
669 // Noops don't affect the scoreboard state. Copies are likely to be
673 // For inline asm, clear the pipeline state.
678 // Calls are scheduled with their preceding instructions. For bottom-up
679 // scheduling, clear the pipeline state before emitting.
683 HazardRec->EmitInstruction(SU);
686 static void resetVRegCycle(SUnit *SU);
688 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
689 /// count of its predecessors. If a predecessor pending count is zero, add it to
690 /// the Available queue.
691 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
692 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
693 DEBUG(SU->dump(this));
696 if (CurCycle < SU->getHeight())
697 DEBUG(dbgs() << " Height [" << SU->getHeight()
698 << "] pipeline stall!\n");
701 // FIXME: Do not modify node height. It may interfere with
702 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
703 // node its ready cycle can aid heuristics, and after scheduling it can
704 // indicate the scheduled cycle.
705 SU->setHeightToAtLeast(CurCycle);
707 // Reserve resources for the scheduled intruction.
710 Sequence.push_back(SU);
712 AvailableQueue->ScheduledNode(SU);
714 // If HazardRec is disabled, and each inst counts as one cycle, then
715 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
716 // PendingQueue for schedulers that implement HasReadyFilter.
717 if (!HazardRec->isEnabled() && AvgIPC < 2)
718 AdvanceToCycle(CurCycle + 1);
720 // Update liveness of predecessors before successors to avoid treating a
721 // two-address node as a live range def.
722 ReleasePredecessors(SU);
724 // Release all the implicit physical register defs that are live.
725 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
727 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
728 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
729 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
731 LiveRegDefs[I->getReg()] = NULL;
732 LiveRegGens[I->getReg()] = NULL;
735 // Release the special call resource dependence, if this is the beginning
737 unsigned CallResource = TRI->getNumRegs();
738 if (LiveRegDefs[CallResource] == SU)
739 for (const SDNode *SUNode = SU->getNode(); SUNode;
740 SUNode = SUNode->getGluedNode()) {
741 if (SUNode->isMachineOpcode() &&
742 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
743 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
745 LiveRegDefs[CallResource] = NULL;
746 LiveRegGens[CallResource] = NULL;
752 SU->isScheduled = true;
754 // Conditions under which the scheduler should eagerly advance the cycle:
755 // (1) No available instructions
756 // (2) All pipelines full, so available instructions must have hazards.
758 // If HazardRec is disabled, the cycle was pre-advanced before calling
759 // ReleasePredecessors. In that case, IssueCount should remain 0.
761 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
762 if (HazardRec->isEnabled() || AvgIPC > 1) {
763 if (SU->getNode() && SU->getNode()->isMachineOpcode())
765 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
766 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
767 AdvanceToCycle(CurCycle + 1);
771 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
772 /// unscheduled, incrcease the succ left count of its predecessors. Remove
773 /// them from AvailableQueue if necessary.
774 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
775 SUnit *PredSU = PredEdge->getSUnit();
776 if (PredSU->isAvailable) {
777 PredSU->isAvailable = false;
778 if (!PredSU->isPending)
779 AvailableQueue->remove(PredSU);
782 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
783 ++PredSU->NumSuccsLeft;
786 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
787 /// its predecessor states to reflect the change.
788 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
789 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
790 DEBUG(SU->dump(this));
792 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
795 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
796 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
797 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
798 "Physical register dependency violated?");
800 LiveRegDefs[I->getReg()] = NULL;
801 LiveRegGens[I->getReg()] = NULL;
805 // Reclaim the special call resource dependence, if this is the beginning
807 unsigned CallResource = TRI->getNumRegs();
808 for (const SDNode *SUNode = SU->getNode(); SUNode;
809 SUNode = SUNode->getGluedNode()) {
810 if (SUNode->isMachineOpcode() &&
811 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
813 LiveRegDefs[CallResource] = SU;
814 LiveRegGens[CallResource] = NULL;
818 // Release the special call resource dependence, if this is the end
820 if (LiveRegGens[CallResource] == SU)
821 for (const SDNode *SUNode = SU->getNode(); SUNode;
822 SUNode = SUNode->getGluedNode()) {
823 if (SUNode->isMachineOpcode() &&
824 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
825 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
827 LiveRegDefs[CallResource] = NULL;
828 LiveRegGens[CallResource] = NULL;
832 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
834 if (I->isAssignedRegDep()) {
835 // This becomes the nearest def. Note that an earlier def may still be
836 // pending if this is a two-address node.
837 LiveRegDefs[I->getReg()] = SU;
838 if (!LiveRegDefs[I->getReg()]) {
841 if (LiveRegGens[I->getReg()] == NULL ||
842 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
843 LiveRegGens[I->getReg()] = I->getSUnit();
846 if (SU->getHeight() < MinAvailableCycle)
847 MinAvailableCycle = SU->getHeight();
849 SU->setHeightDirty();
850 SU->isScheduled = false;
851 SU->isAvailable = true;
852 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
853 // Don't make available until backtracking is complete.
854 SU->isPending = true;
855 PendingQueue.push_back(SU);
858 AvailableQueue->push(SU);
860 AvailableQueue->UnscheduledNode(SU);
863 /// After backtracking, the hazard checker needs to be restored to a state
864 /// corresponding the the current cycle.
865 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
868 unsigned LookAhead = std::min((unsigned)Sequence.size(),
869 HazardRec->getMaxLookAhead());
873 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
874 unsigned HazardCycle = (*I)->getHeight();
875 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
877 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
878 HazardRec->RecedeCycle();
884 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
885 /// BTCycle in order to schedule a specific node.
886 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
887 SUnit *OldSU = Sequence.back();
890 if (SU->isSucc(OldSU))
891 // Don't try to remove SU from AvailableQueue.
892 SU->isAvailable = false;
893 // FIXME: use ready cycle instead of height
894 CurCycle = OldSU->getHeight();
895 UnscheduleNodeBottomUp(OldSU);
896 AvailableQueue->setCurCycle(CurCycle);
899 OldSU = Sequence.back();
902 assert(!SU->isSucc(OldSU) && "Something is wrong!");
904 RestoreHazardCheckerBottomUp();
911 static bool isOperandOf(const SUnit *SU, SDNode *N) {
912 for (const SDNode *SUNode = SU->getNode(); SUNode;
913 SUNode = SUNode->getGluedNode()) {
914 if (SUNode->isOperandOf(N))
920 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
921 /// successors to the newly created node.
922 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
923 SDNode *N = SU->getNode();
927 if (SU->getNode()->getGluedNode())
931 bool TryUnfold = false;
932 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
933 EVT VT = N->getValueType(i);
936 else if (VT == MVT::Other)
939 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
940 const SDValue &Op = N->getOperand(i);
941 EVT VT = Op.getNode()->getValueType(Op.getResNo());
947 SmallVector<SDNode*, 2> NewNodes;
948 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
951 // unfolding an x86 DEC64m operation results in store, dec, load which
952 // can't be handled here so quit
953 if (NewNodes.size() == 3)
956 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
957 assert(NewNodes.size() == 2 && "Expected a load folding node!");
960 SDNode *LoadNode = NewNodes[0];
961 unsigned NumVals = N->getNumValues();
962 unsigned OldNumVals = SU->getNode()->getNumValues();
963 for (unsigned i = 0; i != NumVals; ++i)
964 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
965 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
966 SDValue(LoadNode, 1));
968 // LoadNode may already exist. This can happen when there is another
969 // load from the same location and producing the same type of value
970 // but it has different alignment or volatileness.
971 bool isNewLoad = true;
973 if (LoadNode->getNodeId() != -1) {
974 LoadSU = &SUnits[LoadNode->getNodeId()];
977 LoadSU = CreateNewSUnit(LoadNode);
978 LoadNode->setNodeId(LoadSU->NodeNum);
980 InitNumRegDefsLeft(LoadSU);
981 ComputeLatency(LoadSU);
984 SUnit *NewSU = CreateNewSUnit(N);
985 assert(N->getNodeId() == -1 && "Node already inserted!");
986 N->setNodeId(NewSU->NodeNum);
988 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
989 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
990 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
991 NewSU->isTwoAddress = true;
995 if (MCID.isCommutable())
996 NewSU->isCommutable = true;
998 InitNumRegDefsLeft(NewSU);
999 ComputeLatency(NewSU);
1001 // Record all the edges to and from the old SU, by category.
1002 SmallVector<SDep, 4> ChainPreds;
1003 SmallVector<SDep, 4> ChainSuccs;
1004 SmallVector<SDep, 4> LoadPreds;
1005 SmallVector<SDep, 4> NodePreds;
1006 SmallVector<SDep, 4> NodeSuccs;
1007 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1010 ChainPreds.push_back(*I);
1011 else if (isOperandOf(I->getSUnit(), LoadNode))
1012 LoadPreds.push_back(*I);
1014 NodePreds.push_back(*I);
1016 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1019 ChainSuccs.push_back(*I);
1021 NodeSuccs.push_back(*I);
1024 // Now assign edges to the newly-created nodes.
1025 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1026 const SDep &Pred = ChainPreds[i];
1027 RemovePred(SU, Pred);
1029 AddPred(LoadSU, Pred);
1031 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1032 const SDep &Pred = LoadPreds[i];
1033 RemovePred(SU, Pred);
1035 AddPred(LoadSU, Pred);
1037 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1038 const SDep &Pred = NodePreds[i];
1039 RemovePred(SU, Pred);
1040 AddPred(NewSU, Pred);
1042 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1043 SDep D = NodeSuccs[i];
1044 SUnit *SuccDep = D.getSUnit();
1046 RemovePred(SuccDep, D);
1048 AddPred(SuccDep, D);
1049 // Balance register pressure.
1050 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1051 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1052 --NewSU->NumRegDefsLeft;
1054 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1055 SDep D = ChainSuccs[i];
1056 SUnit *SuccDep = D.getSUnit();
1058 RemovePred(SuccDep, D);
1061 AddPred(SuccDep, D);
1065 // Add a data dependency to reflect that NewSU reads the value defined
1067 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
1070 AvailableQueue->addNode(LoadSU);
1071 AvailableQueue->addNode(NewSU);
1075 if (NewSU->NumSuccsLeft == 0) {
1076 NewSU->isAvailable = true;
1082 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1083 NewSU = CreateClone(SU);
1085 // New SUnit has the exact same predecessors.
1086 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1088 if (!I->isArtificial())
1091 // Only copy scheduled successors. Cut them from old node's successor
1092 // list and move them over.
1093 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1094 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1096 if (I->isArtificial())
1098 SUnit *SuccSU = I->getSUnit();
1099 if (SuccSU->isScheduled) {
1104 DelDeps.push_back(std::make_pair(SuccSU, D));
1107 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1108 RemovePred(DelDeps[i].first, DelDeps[i].second);
1110 AvailableQueue->updateNode(SU);
1111 AvailableQueue->addNode(NewSU);
1117 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1118 /// scheduled successors of the given SUnit to the last copy.
1119 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1120 const TargetRegisterClass *DestRC,
1121 const TargetRegisterClass *SrcRC,
1122 SmallVector<SUnit*, 2> &Copies) {
1123 SUnit *CopyFromSU = CreateNewSUnit(NULL);
1124 CopyFromSU->CopySrcRC = SrcRC;
1125 CopyFromSU->CopyDstRC = DestRC;
1127 SUnit *CopyToSU = CreateNewSUnit(NULL);
1128 CopyToSU->CopySrcRC = DestRC;
1129 CopyToSU->CopyDstRC = SrcRC;
1131 // Only copy scheduled successors. Cut them from old node's successor
1132 // list and move them over.
1133 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1134 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1136 if (I->isArtificial())
1138 SUnit *SuccSU = I->getSUnit();
1139 if (SuccSU->isScheduled) {
1141 D.setSUnit(CopyToSU);
1143 DelDeps.push_back(std::make_pair(SuccSU, *I));
1146 // Avoid scheduling the def-side copy before other successors. Otherwise
1147 // we could introduce another physreg interference on the copy and
1148 // continue inserting copies indefinitely.
1149 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
1150 /*Reg=*/0, /*isNormalMemory=*/false,
1151 /*isMustAlias=*/false, /*isArtificial=*/true);
1155 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1156 RemovePred(DelDeps[i].first, DelDeps[i].second);
1158 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1159 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1161 AvailableQueue->updateNode(SU);
1162 AvailableQueue->addNode(CopyFromSU);
1163 AvailableQueue->addNode(CopyToSU);
1164 Copies.push_back(CopyFromSU);
1165 Copies.push_back(CopyToSU);
1170 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1171 /// definition of the specified node.
1172 /// FIXME: Move to SelectionDAG?
1173 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1174 const TargetInstrInfo *TII) {
1175 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1176 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1177 unsigned NumRes = MCID.getNumDefs();
1178 for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1183 return N->getValueType(NumRes);
1186 /// CheckForLiveRegDef - Return true and update live register vector if the
1187 /// specified register def of the specified SUnit clobbers any "live" registers.
1188 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1189 std::vector<SUnit*> &LiveRegDefs,
1190 SmallSet<unsigned, 4> &RegAdded,
1191 SmallVector<unsigned, 4> &LRegs,
1192 const TargetRegisterInfo *TRI) {
1193 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1195 // Check if Ref is live.
1196 if (!LiveRegDefs[*AliasI]) continue;
1198 // Allow multiple uses of the same def.
1199 if (LiveRegDefs[*AliasI] == SU) continue;
1201 // Add Reg to the set of interfering live regs.
1202 if (RegAdded.insert(*AliasI)) {
1203 LRegs.push_back(*AliasI);
1208 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1209 /// scheduling of the given node to satisfy live physical register dependencies.
1210 /// If the specific node is the last one that's available to schedule, do
1211 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1212 bool ScheduleDAGRRList::
1213 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1214 if (NumLiveRegs == 0)
1217 SmallSet<unsigned, 4> RegAdded;
1218 // If this node would clobber any "live" register, then it's not ready.
1220 // If SU is the currently live definition of the same register that it uses,
1221 // then we are free to schedule it.
1222 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1224 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1225 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1226 RegAdded, LRegs, TRI);
1229 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1230 if (Node->getOpcode() == ISD::INLINEASM) {
1231 // Inline asm can clobber physical defs.
1232 unsigned NumOps = Node->getNumOperands();
1233 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1234 --NumOps; // Ignore the glue operand.
1236 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1238 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1239 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1241 ++i; // Skip the ID value.
1242 if (InlineAsm::isRegDefKind(Flags) ||
1243 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1244 InlineAsm::isClobberKind(Flags)) {
1245 // Check for def of register or earlyclobber register.
1246 for (; NumVals; --NumVals, ++i) {
1247 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1248 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1249 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1257 if (!Node->isMachineOpcode())
1259 // If we're in the middle of scheduling a call, don't begin scheduling
1260 // another call. Also, don't allow any physical registers to be live across
1262 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1263 // Check the special calling-sequence resource.
1264 unsigned CallResource = TRI->getNumRegs();
1265 if (LiveRegDefs[CallResource]) {
1266 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1267 while (SDNode *Glued = Gen->getGluedNode())
1269 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1270 LRegs.push_back(CallResource);
1273 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1274 if (!MCID.ImplicitDefs)
1276 for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
1277 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1280 return !LRegs.empty();
1283 /// Return a node that can be scheduled in this cycle. Requirements:
1284 /// (1) Ready: latency has been satisfied
1285 /// (2) No Hazards: resources are available
1286 /// (3) No Interferences: may unschedule to break register interferences.
1287 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1288 SmallVector<SUnit*, 4> Interferences;
1289 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1291 SUnit *CurSU = AvailableQueue->pop();
1293 SmallVector<unsigned, 4> LRegs;
1294 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1296 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1298 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1299 Interferences.push_back(CurSU);
1300 CurSU = AvailableQueue->pop();
1303 // Add the nodes that aren't ready back onto the available list.
1304 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1305 Interferences[i]->isPending = false;
1306 assert(Interferences[i]->isAvailable && "must still be available");
1307 AvailableQueue->push(Interferences[i]);
1312 // All candidates are delayed due to live physical reg dependencies.
1313 // Try backtracking, code duplication, or inserting cross class copies
1315 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1316 SUnit *TrySU = Interferences[i];
1317 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1319 // Try unscheduling up to the point where it's safe to schedule
1322 unsigned LiveCycle = UINT_MAX;
1323 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1324 unsigned Reg = LRegs[j];
1325 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1326 BtSU = LiveRegGens[Reg];
1327 LiveCycle = BtSU->getHeight();
1330 if (!WillCreateCycle(TrySU, BtSU)) {
1331 BacktrackBottomUp(TrySU, BtSU);
1333 // Force the current node to be scheduled before the node that
1334 // requires the physical reg dep.
1335 if (BtSU->isAvailable) {
1336 BtSU->isAvailable = false;
1337 if (!BtSU->isPending)
1338 AvailableQueue->remove(BtSU);
1340 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1341 /*Reg=*/0, /*isNormalMemory=*/false,
1342 /*isMustAlias=*/false, /*isArtificial=*/true));
1344 // If one or more successors has been unscheduled, then the current
1345 // node is no longer avaialable. Schedule a successor that's now
1346 // available instead.
1347 if (!TrySU->isAvailable) {
1348 CurSU = AvailableQueue->pop();
1352 TrySU->isPending = false;
1353 Interferences.erase(Interferences.begin()+i);
1360 // Can't backtrack. If it's too expensive to copy the value, then try
1361 // duplicate the nodes that produces these "too expensive to copy"
1362 // values to break the dependency. In case even that doesn't work,
1363 // insert cross class copies.
1364 // If it's not too expensive, i.e. cost != -1, issue copies.
1365 SUnit *TrySU = Interferences[0];
1366 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1367 assert(LRegs.size() == 1 && "Can't handle this yet!");
1368 unsigned Reg = LRegs[0];
1369 SUnit *LRDef = LiveRegDefs[Reg];
1370 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1371 const TargetRegisterClass *RC =
1372 TRI->getMinimalPhysRegClass(Reg, VT);
1373 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1375 // If cross copy register class is the same as RC, then it must be possible
1376 // copy the value directly. Do not try duplicate the def.
1377 // If cross copy register class is not the same as RC, then it's possible to
1378 // copy the value but it require cross register class copies and it is
1380 // If cross copy register class is null, then it's not possible to copy
1381 // the value at all.
1384 NewDef = CopyAndMoveSuccessors(LRDef);
1385 if (!DestRC && !NewDef)
1386 report_fatal_error("Can't handle live physical register dependency!");
1389 // Issue copies, these can be expensive cross register class copies.
1390 SmallVector<SUnit*, 2> Copies;
1391 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1392 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1393 << " to SU #" << Copies.front()->NodeNum << "\n");
1394 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1395 /*Reg=*/0, /*isNormalMemory=*/false,
1396 /*isMustAlias=*/false,
1397 /*isArtificial=*/true));
1398 NewDef = Copies.back();
1401 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1402 << " to SU #" << TrySU->NodeNum << "\n");
1403 LiveRegDefs[Reg] = NewDef;
1404 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1405 /*Reg=*/0, /*isNormalMemory=*/false,
1406 /*isMustAlias=*/false,
1407 /*isArtificial=*/true));
1408 TrySU->isAvailable = false;
1412 assert(CurSU && "Unable to resolve live physical register dependencies!");
1414 // Add the nodes that aren't ready back onto the available list.
1415 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1416 Interferences[i]->isPending = false;
1417 // May no longer be available due to backtracking.
1418 if (Interferences[i]->isAvailable) {
1419 AvailableQueue->push(Interferences[i]);
1425 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1427 void ScheduleDAGRRList::ListScheduleBottomUp() {
1428 // Release any predecessors of the special Exit node.
1429 ReleasePredecessors(&ExitSU);
1431 // Add root to Available queue.
1432 if (!SUnits.empty()) {
1433 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1434 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1435 RootSU->isAvailable = true;
1436 AvailableQueue->push(RootSU);
1439 // While Available queue is not empty, grab the node with the highest
1440 // priority. If it is not ready put it back. Schedule the node.
1441 Sequence.reserve(SUnits.size());
1442 while (!AvailableQueue->empty()) {
1443 DEBUG(dbgs() << "\nExamining Available:\n";
1444 AvailableQueue->dump(this));
1446 // Pick the best node to schedule taking all constraints into
1448 SUnit *SU = PickNodeToScheduleBottomUp();
1450 AdvancePastStalls(SU);
1452 ScheduleNodeBottomUp(SU);
1454 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1455 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1456 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1457 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1461 // Reverse the order if it is bottom up.
1462 std::reverse(Sequence.begin(), Sequence.end());
1465 VerifySchedule(/*isBottomUp=*/true);
1469 //===----------------------------------------------------------------------===//
1470 // RegReductionPriorityQueue Definition
1471 //===----------------------------------------------------------------------===//
1473 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1474 // to reduce register pressure.
1477 class RegReductionPQBase;
1479 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1480 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1485 struct reverse_sort : public queue_sort {
1487 reverse_sort(SF &sf) : SortFunc(sf) {}
1488 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1490 bool operator()(SUnit* left, SUnit* right) const {
1491 // reverse left/right rather than simply !SortFunc(left, right)
1492 // to expose different paths in the comparison logic.
1493 return SortFunc(right, left);
1498 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1499 // reduction scheduler.
1500 struct bu_ls_rr_sort : public queue_sort {
1503 HasReadyFilter = false
1506 RegReductionPQBase *SPQ;
1507 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1508 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1510 bool operator()(SUnit* left, SUnit* right) const;
1513 // src_ls_rr_sort - Priority function for source order scheduler.
1514 struct src_ls_rr_sort : public queue_sort {
1517 HasReadyFilter = false
1520 RegReductionPQBase *SPQ;
1521 src_ls_rr_sort(RegReductionPQBase *spq)
1523 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1526 bool operator()(SUnit* left, SUnit* right) const;
1529 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1530 struct hybrid_ls_rr_sort : public queue_sort {
1533 HasReadyFilter = false
1536 RegReductionPQBase *SPQ;
1537 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1539 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1542 bool isReady(SUnit *SU, unsigned CurCycle) const;
1544 bool operator()(SUnit* left, SUnit* right) const;
1547 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1549 struct ilp_ls_rr_sort : public queue_sort {
1552 HasReadyFilter = false
1555 RegReductionPQBase *SPQ;
1556 ilp_ls_rr_sort(RegReductionPQBase *spq)
1558 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1561 bool isReady(SUnit *SU, unsigned CurCycle) const;
1563 bool operator()(SUnit* left, SUnit* right) const;
1566 class RegReductionPQBase : public SchedulingPriorityQueue {
1568 std::vector<SUnit*> Queue;
1569 unsigned CurQueueId;
1570 bool TracksRegPressure;
1572 // SUnits - The SUnits for the current graph.
1573 std::vector<SUnit> *SUnits;
1575 MachineFunction &MF;
1576 const TargetInstrInfo *TII;
1577 const TargetRegisterInfo *TRI;
1578 const TargetLowering *TLI;
1579 ScheduleDAGRRList *scheduleDAG;
1581 // SethiUllmanNumbers - The SethiUllman number for each node.
1582 std::vector<unsigned> SethiUllmanNumbers;
1584 /// RegPressure - Tracking current reg pressure per register class.
1586 std::vector<unsigned> RegPressure;
1588 /// RegLimit - Tracking the number of allocatable registers per register
1590 std::vector<unsigned> RegLimit;
1593 RegReductionPQBase(MachineFunction &mf,
1594 bool hasReadyFilter,
1596 const TargetInstrInfo *tii,
1597 const TargetRegisterInfo *tri,
1598 const TargetLowering *tli)
1599 : SchedulingPriorityQueue(hasReadyFilter),
1600 CurQueueId(0), TracksRegPressure(tracksrp),
1601 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1602 if (TracksRegPressure) {
1603 unsigned NumRC = TRI->getNumRegClasses();
1604 RegLimit.resize(NumRC);
1605 RegPressure.resize(NumRC);
1606 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1607 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1608 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1609 E = TRI->regclass_end(); I != E; ++I)
1610 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1614 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1615 scheduleDAG = scheduleDag;
1618 ScheduleHazardRecognizer* getHazardRec() {
1619 return scheduleDAG->getHazardRec();
1622 void initNodes(std::vector<SUnit> &sunits);
1624 void addNode(const SUnit *SU);
1626 void updateNode(const SUnit *SU);
1628 void releaseState() {
1630 SethiUllmanNumbers.clear();
1631 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1634 unsigned getNodePriority(const SUnit *SU) const;
1636 unsigned getNodeOrdering(const SUnit *SU) const {
1637 if (!SU->getNode()) return 0;
1639 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1642 bool empty() const { return Queue.empty(); }
1644 void push(SUnit *U) {
1645 assert(!U->NodeQueueId && "Node in the queue already");
1646 U->NodeQueueId = ++CurQueueId;
1650 void remove(SUnit *SU) {
1651 assert(!Queue.empty() && "Queue is empty!");
1652 assert(SU->NodeQueueId != 0 && "Not in queue!");
1653 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1655 if (I != prior(Queue.end()))
1656 std::swap(*I, Queue.back());
1658 SU->NodeQueueId = 0;
1661 bool tracksRegPressure() const { return TracksRegPressure; }
1663 void dumpRegPressure() const;
1665 bool HighRegPressure(const SUnit *SU) const;
1667 bool MayReduceRegPressure(SUnit *SU) const;
1669 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1671 void ScheduledNode(SUnit *SU);
1673 void UnscheduledNode(SUnit *SU);
1676 bool canClobber(const SUnit *SU, const SUnit *Op);
1677 void AddPseudoTwoAddrDeps();
1678 void PrescheduleNodesWithMultipleUses();
1679 void CalculateSethiUllmanNumbers();
1683 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1684 std::vector<SUnit *>::iterator Best = Q.begin();
1685 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1686 E = Q.end(); I != E; ++I)
1687 if (Picker(*Best, *I))
1690 if (Best != prior(Q.end()))
1691 std::swap(*Best, Q.back());
1697 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1699 if (DAG->StressSched) {
1700 reverse_sort<SF> RPicker(Picker);
1701 return popFromQueueImpl(Q, RPicker);
1705 return popFromQueueImpl(Q, Picker);
1709 class RegReductionPriorityQueue : public RegReductionPQBase {
1713 RegReductionPriorityQueue(MachineFunction &mf,
1715 const TargetInstrInfo *tii,
1716 const TargetRegisterInfo *tri,
1717 const TargetLowering *tli)
1718 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1721 bool isBottomUp() const { return SF::IsBottomUp; }
1723 bool isReady(SUnit *U) const {
1724 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1728 if (Queue.empty()) return NULL;
1730 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1735 void dump(ScheduleDAG *DAG) const {
1736 // Emulate pop() without clobbering NodeQueueIds.
1737 std::vector<SUnit*> DumpQueue = Queue;
1738 SF DumpPicker = Picker;
1739 while (!DumpQueue.empty()) {
1740 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1741 dbgs() << "Height " << SU->getHeight() << ": ";
1747 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1748 BURegReductionPriorityQueue;
1750 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1751 SrcRegReductionPriorityQueue;
1753 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1754 HybridBURRPriorityQueue;
1756 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1757 ILPBURRPriorityQueue;
1758 } // end anonymous namespace
1760 //===----------------------------------------------------------------------===//
1761 // Static Node Priority for Register Pressure Reduction
1762 //===----------------------------------------------------------------------===//
1764 // Check for special nodes that bypass scheduling heuristics.
1765 // Currently this pushes TokenFactor nodes down, but may be used for other
1766 // pseudo-ops as well.
1768 // Return -1 to schedule right above left, 1 for left above right.
1769 // Return 0 if no bias exists.
1770 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1771 bool LSchedLow = left->isScheduleLow;
1772 bool RSchedLow = right->isScheduleLow;
1773 if (LSchedLow != RSchedLow)
1774 return LSchedLow < RSchedLow ? 1 : -1;
1778 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1779 /// Smaller number is the higher priority.
1781 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1782 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1783 if (SethiUllmanNumber != 0)
1784 return SethiUllmanNumber;
1787 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1789 if (I->isCtrl()) continue; // ignore chain preds
1790 SUnit *PredSU = I->getSUnit();
1791 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1792 if (PredSethiUllman > SethiUllmanNumber) {
1793 SethiUllmanNumber = PredSethiUllman;
1795 } else if (PredSethiUllman == SethiUllmanNumber)
1799 SethiUllmanNumber += Extra;
1801 if (SethiUllmanNumber == 0)
1802 SethiUllmanNumber = 1;
1804 return SethiUllmanNumber;
1807 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1808 /// scheduling units.
1809 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1810 SethiUllmanNumbers.assign(SUnits->size(), 0);
1812 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1813 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1816 void RegReductionPQBase::addNode(const SUnit *SU) {
1817 unsigned SUSize = SethiUllmanNumbers.size();
1818 if (SUnits->size() > SUSize)
1819 SethiUllmanNumbers.resize(SUSize*2, 0);
1820 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1823 void RegReductionPQBase::updateNode(const SUnit *SU) {
1824 SethiUllmanNumbers[SU->NodeNum] = 0;
1825 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1828 // Lower priority means schedule further down. For bottom-up scheduling, lower
1829 // priority SUs are scheduled before higher priority SUs.
1830 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1831 assert(SU->NodeNum < SethiUllmanNumbers.size());
1832 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1833 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1834 // CopyToReg should be close to its uses to facilitate coalescing and
1837 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1838 Opc == TargetOpcode::SUBREG_TO_REG ||
1839 Opc == TargetOpcode::INSERT_SUBREG)
1840 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1841 // close to their uses to facilitate coalescing.
1843 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1844 // If SU does not have a register use, i.e. it doesn't produce a value
1845 // that would be consumed (e.g. store), then it terminates a chain of
1846 // computation. Give it a large SethiUllman number so it will be
1847 // scheduled right before its predecessors that it doesn't lengthen
1848 // their live ranges.
1850 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1851 // If SU does not have a register def, schedule it close to its uses
1852 // because it does not lengthen any live ranges.
1855 return SethiUllmanNumbers[SU->NodeNum];
1857 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1859 // FIXME: This assumes all of the defs are used as call operands.
1860 int NP = (int)Priority - SU->getNode()->getNumValues();
1861 return (NP > 0) ? NP : 0;
1867 //===----------------------------------------------------------------------===//
1868 // Register Pressure Tracking
1869 //===----------------------------------------------------------------------===//
1871 void RegReductionPQBase::dumpRegPressure() const {
1872 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1873 E = TRI->regclass_end(); I != E; ++I) {
1874 const TargetRegisterClass *RC = *I;
1875 unsigned Id = RC->getID();
1876 unsigned RP = RegPressure[Id];
1878 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1883 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1887 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1891 SUnit *PredSU = I->getSUnit();
1892 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1893 // to cover the number of registers defined (they are all live).
1894 if (PredSU->NumRegDefsLeft == 0) {
1897 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1898 RegDefPos.IsValid(); RegDefPos.Advance()) {
1899 unsigned RCId, Cost;
1900 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1902 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1909 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1910 const SDNode *N = SU->getNode();
1912 if (!N->isMachineOpcode() || !SU->NumSuccs)
1915 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1916 for (unsigned i = 0; i != NumDefs; ++i) {
1917 EVT VT = N->getValueType(i);
1918 if (!N->hasAnyUseOfValue(i))
1920 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1921 if (RegPressure[RCId] >= RegLimit[RCId])
1927 // Compute the register pressure contribution by this instruction by count up
1928 // for uses that are not live and down for defs. Only count register classes
1929 // that are already under high pressure. As a side effect, compute the number of
1930 // uses of registers that are already live.
1932 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1933 // so could probably be factored.
1934 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1937 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1941 SUnit *PredSU = I->getSUnit();
1942 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1943 // to cover the number of registers defined (they are all live).
1944 if (PredSU->NumRegDefsLeft == 0) {
1945 if (PredSU->getNode()->isMachineOpcode())
1949 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1950 RegDefPos.IsValid(); RegDefPos.Advance()) {
1951 EVT VT = RegDefPos.GetValue();
1952 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1953 if (RegPressure[RCId] >= RegLimit[RCId])
1957 const SDNode *N = SU->getNode();
1959 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1962 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1963 for (unsigned i = 0; i != NumDefs; ++i) {
1964 EVT VT = N->getValueType(i);
1965 if (!N->hasAnyUseOfValue(i))
1967 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1968 if (RegPressure[RCId] >= RegLimit[RCId])
1974 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1975 if (!TracksRegPressure)
1981 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1985 SUnit *PredSU = I->getSUnit();
1986 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1987 // to cover the number of registers defined (they are all live).
1988 if (PredSU->NumRegDefsLeft == 0) {
1991 // FIXME: The ScheduleDAG currently loses information about which of a
1992 // node's values is consumed by each dependence. Consequently, if the node
1993 // defines multiple register classes, we don't know which to pressurize
1994 // here. Instead the following loop consumes the register defs in an
1995 // arbitrary order. At least it handles the common case of clustered loads
1996 // to the same class. For precise liveness, each SDep needs to indicate the
1997 // result number. But that tightly couples the ScheduleDAG with the
1998 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1999 // value type or register class to SDep.
2001 // The most important aspect of register tracking is balancing the increase
2002 // here with the reduction further below. Note that this SU may use multiple
2003 // defs in PredSU. The can't be determined here, but we've already
2004 // compensated by reducing NumRegDefsLeft in PredSU during
2005 // ScheduleDAGSDNodes::AddSchedEdges.
2006 --PredSU->NumRegDefsLeft;
2007 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2008 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2009 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2013 unsigned RCId, Cost;
2014 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2015 RegPressure[RCId] += Cost;
2020 // We should have this assert, but there may be dead SDNodes that never
2021 // materialize as SUnits, so they don't appear to generate liveness.
2022 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2023 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2024 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2025 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2026 if (SkipRegDefs > 0)
2028 unsigned RCId, Cost;
2029 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2030 if (RegPressure[RCId] < Cost) {
2031 // Register pressure tracking is imprecise. This can happen. But we try
2032 // hard not to let it happen because it likely results in poor scheduling.
2033 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2034 RegPressure[RCId] = 0;
2037 RegPressure[RCId] -= Cost;
2043 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
2044 if (!TracksRegPressure)
2047 const SDNode *N = SU->getNode();
2050 if (!N->isMachineOpcode()) {
2051 if (N->getOpcode() != ISD::CopyToReg)
2054 unsigned Opc = N->getMachineOpcode();
2055 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2056 Opc == TargetOpcode::INSERT_SUBREG ||
2057 Opc == TargetOpcode::SUBREG_TO_REG ||
2058 Opc == TargetOpcode::REG_SEQUENCE ||
2059 Opc == TargetOpcode::IMPLICIT_DEF)
2063 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2067 SUnit *PredSU = I->getSUnit();
2068 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2069 // counts data deps.
2070 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2072 const SDNode *PN = PredSU->getNode();
2073 if (!PN->isMachineOpcode()) {
2074 if (PN->getOpcode() == ISD::CopyFromReg) {
2075 EVT VT = PN->getValueType(0);
2076 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2077 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2081 unsigned POpc = PN->getMachineOpcode();
2082 if (POpc == TargetOpcode::IMPLICIT_DEF)
2084 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2085 POpc == TargetOpcode::INSERT_SUBREG ||
2086 POpc == TargetOpcode::SUBREG_TO_REG) {
2087 EVT VT = PN->getValueType(0);
2088 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2089 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2092 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2093 for (unsigned i = 0; i != NumDefs; ++i) {
2094 EVT VT = PN->getValueType(i);
2095 if (!PN->hasAnyUseOfValue(i))
2097 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2098 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2099 // Register pressure tracking is imprecise. This can happen.
2100 RegPressure[RCId] = 0;
2102 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2106 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2107 // may transfer data dependencies to CopyToReg.
2108 if (SU->NumSuccs && N->isMachineOpcode()) {
2109 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2110 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2111 EVT VT = N->getValueType(i);
2112 if (VT == MVT::Glue || VT == MVT::Other)
2114 if (!N->hasAnyUseOfValue(i))
2116 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2117 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2124 //===----------------------------------------------------------------------===//
2125 // Dynamic Node Priority for Register Pressure Reduction
2126 //===----------------------------------------------------------------------===//
2128 /// closestSucc - Returns the scheduled cycle of the successor which is
2129 /// closest to the current cycle.
2130 static unsigned closestSucc(const SUnit *SU) {
2131 unsigned MaxHeight = 0;
2132 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2134 if (I->isCtrl()) continue; // ignore chain succs
2135 unsigned Height = I->getSUnit()->getHeight();
2136 // If there are bunch of CopyToRegs stacked up, they should be considered
2137 // to be at the same position.
2138 if (I->getSUnit()->getNode() &&
2139 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2140 Height = closestSucc(I->getSUnit())+1;
2141 if (Height > MaxHeight)
2147 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2148 /// for scratch registers, i.e. number of data dependencies.
2149 static unsigned calcMaxScratches(const SUnit *SU) {
2150 unsigned Scratches = 0;
2151 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2153 if (I->isCtrl()) continue; // ignore chain preds
2159 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2160 /// CopyFromReg from a virtual register.
2161 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2162 bool RetVal = false;
2163 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2165 if (I->isCtrl()) continue;
2166 const SUnit *PredSU = I->getSUnit();
2167 if (PredSU->getNode() &&
2168 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2170 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2171 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2181 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2182 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2183 /// it has no other use. It should be scheduled closer to the terminator.
2184 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2185 bool RetVal = false;
2186 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2188 if (I->isCtrl()) continue;
2189 const SUnit *SuccSU = I->getSUnit();
2190 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2192 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2193 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2203 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2204 // set isVRegCycle for its CopyFromReg operands.
2206 // This is only relevant for single-block loops, in which case the VRegCycle
2207 // node is likely an induction variable in which the operand and target virtual
2208 // registers should be coalesced (e.g. pre/post increment values). Setting the
2209 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2210 // CopyFromReg so that this node becomes the virtual register "kill". This
2211 // avoids interference between the values live in and out of the block and
2212 // eliminates a copy inside the loop.
2213 static void initVRegCycle(SUnit *SU) {
2214 if (DisableSchedVRegCycle)
2217 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2220 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2222 SU->isVRegCycle = true;
2224 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2226 if (I->isCtrl()) continue;
2227 I->getSUnit()->isVRegCycle = true;
2231 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2232 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2233 static void resetVRegCycle(SUnit *SU) {
2234 if (!SU->isVRegCycle)
2237 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2239 if (I->isCtrl()) continue; // ignore chain preds
2240 SUnit *PredSU = I->getSUnit();
2241 if (PredSU->isVRegCycle) {
2242 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2243 "VRegCycle def must be CopyFromReg");
2244 I->getSUnit()->isVRegCycle = 0;
2249 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2250 // means a node that defines the VRegCycle has not been scheduled yet.
2251 static bool hasVRegCycleUse(const SUnit *SU) {
2252 // If this SU also defines the VReg, don't hoist it as a "use".
2253 if (SU->isVRegCycle)
2256 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2258 if (I->isCtrl()) continue; // ignore chain preds
2259 if (I->getSUnit()->isVRegCycle &&
2260 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2261 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2268 // Check for either a dependence (latency) or resource (hazard) stall.
2270 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2271 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2272 if ((int)SPQ->getCurCycle() < Height) return true;
2273 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2274 != ScheduleHazardRecognizer::NoHazard)
2279 // Return -1 if left has higher priority, 1 if right has higher priority.
2280 // Return 0 if latency-based priority is equivalent.
2281 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2282 RegReductionPQBase *SPQ) {
2283 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2284 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2285 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2286 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2287 int LHeight = (int)left->getHeight() + LPenalty;
2288 int RHeight = (int)right->getHeight() + RPenalty;
2290 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2291 BUHasStall(left, LHeight, SPQ);
2292 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2293 BUHasStall(right, RHeight, SPQ);
2295 // If scheduling one of the node will cause a pipeline stall, delay it.
2296 // If scheduling either one of the node will cause a pipeline stall, sort
2297 // them according to their height.
2300 DEBUG(++FactorCount[FactStall]);
2303 if (LHeight != RHeight) {
2304 DEBUG(++FactorCount[FactStall]);
2305 return LHeight > RHeight ? 1 : -1;
2307 } else if (RStall) {
2308 DEBUG(++FactorCount[FactStall]);
2312 // If either node is scheduling for latency, sort them by height/depth
2314 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2315 right->SchedulingPref == Sched::ILP)) {
2316 if (DisableSchedCycles) {
2317 if (LHeight != RHeight) {
2318 DEBUG(++FactorCount[FactHeight]);
2319 return LHeight > RHeight ? 1 : -1;
2323 // If neither instruction stalls (!LStall && !RStall) then
2324 // its height is already covered so only its depth matters. We also reach
2325 // this if both stall but have the same height.
2326 int LDepth = left->getDepth() - LPenalty;
2327 int RDepth = right->getDepth() - RPenalty;
2328 if (LDepth != RDepth) {
2329 DEBUG(++FactorCount[FactDepth]);
2330 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2331 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2332 << ") depth " << RDepth << "\n");
2333 return LDepth < RDepth ? 1 : -1;
2336 if (left->Latency != right->Latency) {
2337 DEBUG(++FactorCount[FactOther]);
2338 return left->Latency > right->Latency ? 1 : -1;
2344 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2345 // Schedule physical register definitions close to their use. This is
2346 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2347 // long as shortening physreg live ranges is generally good, we can defer
2348 // creating a subtarget hook.
2349 if (!DisableSchedPhysRegJoin) {
2350 bool LHasPhysReg = left->hasPhysRegDefs;
2351 bool RHasPhysReg = right->hasPhysRegDefs;
2352 if (LHasPhysReg != RHasPhysReg) {
2353 DEBUG(++FactorCount[FactRegUses]);
2355 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2357 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2358 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2359 << PhysRegMsg[RHasPhysReg] << "\n");
2360 return LHasPhysReg < RHasPhysReg;
2364 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2365 unsigned LPriority = SPQ->getNodePriority(left);
2366 unsigned RPriority = SPQ->getNodePriority(right);
2368 // Be really careful about hoisting call operands above previous calls.
2369 // Only allows it if it would reduce register pressure.
2370 if (left->isCall && right->isCallOp) {
2371 unsigned RNumVals = right->getNode()->getNumValues();
2372 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2374 if (right->isCall && left->isCallOp) {
2375 unsigned LNumVals = left->getNode()->getNumValues();
2376 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2379 if (LPriority != RPriority) {
2380 DEBUG(++FactorCount[FactStatic]);
2381 return LPriority > RPriority;
2384 // One or both of the nodes are calls and their sethi-ullman numbers are the
2385 // same, then keep source order.
2386 if (left->isCall || right->isCall) {
2387 unsigned LOrder = SPQ->getNodeOrdering(left);
2388 unsigned ROrder = SPQ->getNodeOrdering(right);
2390 // Prefer an ordering where the lower the non-zero order number, the higher
2392 if ((LOrder || ROrder) && LOrder != ROrder)
2393 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2396 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2401 // and the following instructions are both ready.
2405 // Then schedule t2 = op first.
2412 // This creates more short live intervals.
2413 unsigned LDist = closestSucc(left);
2414 unsigned RDist = closestSucc(right);
2415 if (LDist != RDist) {
2416 DEBUG(++FactorCount[FactOther]);
2417 return LDist < RDist;
2420 // How many registers becomes live when the node is scheduled.
2421 unsigned LScratch = calcMaxScratches(left);
2422 unsigned RScratch = calcMaxScratches(right);
2423 if (LScratch != RScratch) {
2424 DEBUG(++FactorCount[FactOther]);
2425 return LScratch > RScratch;
2428 // Comparing latency against a call makes little sense unless the node
2429 // is register pressure-neutral.
2430 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2431 return (left->NodeQueueId > right->NodeQueueId);
2433 // Do not compare latencies when one or both of the nodes are calls.
2434 if (!DisableSchedCycles &&
2435 !(left->isCall || right->isCall)) {
2436 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2441 if (left->getHeight() != right->getHeight()) {
2442 DEBUG(++FactorCount[FactHeight]);
2443 return left->getHeight() > right->getHeight();
2446 if (left->getDepth() != right->getDepth()) {
2447 DEBUG(++FactorCount[FactDepth]);
2448 return left->getDepth() < right->getDepth();
2452 assert(left->NodeQueueId && right->NodeQueueId &&
2453 "NodeQueueId cannot be zero");
2454 DEBUG(++FactorCount[FactOther]);
2455 return (left->NodeQueueId > right->NodeQueueId);
2459 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2460 if (int res = checkSpecialNodes(left, right))
2463 return BURRSort(left, right, SPQ);
2466 // Source order, otherwise bottom up.
2467 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2468 if (int res = checkSpecialNodes(left, right))
2471 unsigned LOrder = SPQ->getNodeOrdering(left);
2472 unsigned ROrder = SPQ->getNodeOrdering(right);
2474 // Prefer an ordering where the lower the non-zero order number, the higher
2476 if ((LOrder || ROrder) && LOrder != ROrder)
2477 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2479 return BURRSort(left, right, SPQ);
2482 // If the time between now and when the instruction will be ready can cover
2483 // the spill code, then avoid adding it to the ready queue. This gives long
2484 // stalls highest priority and allows hoisting across calls. It should also
2485 // speed up processing the available queue.
2486 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2487 static const unsigned ReadyDelay = 3;
2489 if (SPQ->MayReduceRegPressure(SU)) return true;
2491 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2493 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2494 != ScheduleHazardRecognizer::NoHazard)
2500 // Return true if right should be scheduled with higher priority than left.
2501 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2502 if (int res = checkSpecialNodes(left, right))
2505 if (left->isCall || right->isCall)
2506 // No way to compute latency of calls.
2507 return BURRSort(left, right, SPQ);
2509 bool LHigh = SPQ->HighRegPressure(left);
2510 bool RHigh = SPQ->HighRegPressure(right);
2511 // Avoid causing spills. If register pressure is high, schedule for
2512 // register pressure reduction.
2513 if (LHigh && !RHigh) {
2514 DEBUG(++FactorCount[FactPressureDiff]);
2515 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2516 << right->NodeNum << ")\n");
2519 else if (!LHigh && RHigh) {
2520 DEBUG(++FactorCount[FactPressureDiff]);
2521 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2522 << left->NodeNum << ")\n");
2525 if (!LHigh && !RHigh) {
2526 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2530 return BURRSort(left, right, SPQ);
2533 // Schedule as many instructions in each cycle as possible. So don't make an
2534 // instruction available unless it is ready in the current cycle.
2535 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2536 if (SU->getHeight() > CurCycle) return false;
2538 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2539 != ScheduleHazardRecognizer::NoHazard)
2545 static bool canEnableCoalescing(SUnit *SU) {
2546 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2547 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2548 // CopyToReg should be close to its uses to facilitate coalescing and
2552 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2553 Opc == TargetOpcode::SUBREG_TO_REG ||
2554 Opc == TargetOpcode::INSERT_SUBREG)
2555 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2556 // close to their uses to facilitate coalescing.
2559 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2560 // If SU does not have a register def, schedule it close to its uses
2561 // because it does not lengthen any live ranges.
2567 // list-ilp is currently an experimental scheduler that allows various
2568 // heuristics to be enabled prior to the normal register reduction logic.
2569 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2570 if (int res = checkSpecialNodes(left, right))
2573 if (left->isCall || right->isCall)
2574 // No way to compute latency of calls.
2575 return BURRSort(left, right, SPQ);
2577 unsigned LLiveUses = 0, RLiveUses = 0;
2578 int LPDiff = 0, RPDiff = 0;
2579 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2580 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2581 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2583 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2584 DEBUG(++FactorCount[FactPressureDiff]);
2585 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2586 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2587 return LPDiff > RPDiff;
2590 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2591 bool LReduce = canEnableCoalescing(left);
2592 bool RReduce = canEnableCoalescing(right);
2593 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2594 if (LReduce && !RReduce) return false;
2595 if (RReduce && !LReduce) return true;
2598 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2599 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2600 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2601 DEBUG(++FactorCount[FactRegUses]);
2602 return LLiveUses < RLiveUses;
2605 if (!DisableSchedStalls) {
2606 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2607 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2608 if (LStall != RStall) {
2609 DEBUG(++FactorCount[FactHeight]);
2610 return left->getHeight() > right->getHeight();
2614 if (!DisableSchedCriticalPath) {
2615 int spread = (int)left->getDepth() - (int)right->getDepth();
2616 if (std::abs(spread) > MaxReorderWindow) {
2617 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2618 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2619 << right->getDepth() << "\n");
2620 DEBUG(++FactorCount[FactDepth]);
2621 return left->getDepth() < right->getDepth();
2625 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2626 int spread = (int)left->getHeight() - (int)right->getHeight();
2627 if (std::abs(spread) > MaxReorderWindow) {
2628 DEBUG(++FactorCount[FactHeight]);
2629 return left->getHeight() > right->getHeight();
2633 return BURRSort(left, right, SPQ);
2636 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2638 // Add pseudo dependency edges for two-address nodes.
2639 if (!Disable2AddrHack)
2640 AddPseudoTwoAddrDeps();
2641 // Reroute edges to nodes with multiple uses.
2642 if (!TracksRegPressure)
2643 PrescheduleNodesWithMultipleUses();
2644 // Calculate node priorities.
2645 CalculateSethiUllmanNumbers();
2647 // For single block loops, mark nodes that look like canonical IV increments.
2648 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2649 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2650 initVRegCycle(&sunits[i]);
2655 //===----------------------------------------------------------------------===//
2656 // Preschedule for Register Pressure
2657 //===----------------------------------------------------------------------===//
2659 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2660 if (SU->isTwoAddress) {
2661 unsigned Opc = SU->getNode()->getMachineOpcode();
2662 const MCInstrDesc &MCID = TII->get(Opc);
2663 unsigned NumRes = MCID.getNumDefs();
2664 unsigned NumOps = MCID.getNumOperands() - NumRes;
2665 for (unsigned i = 0; i != NumOps; ++i) {
2666 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2667 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2668 if (DU->getNodeId() != -1 &&
2669 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2677 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2678 /// successor's explicit physregs whose definition can reach DepSU.
2679 /// i.e. DepSU should not be scheduled above SU.
2680 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2681 ScheduleDAGRRList *scheduleDAG,
2682 const TargetInstrInfo *TII,
2683 const TargetRegisterInfo *TRI) {
2684 const unsigned *ImpDefs
2685 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2689 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2691 SUnit *SuccSU = SI->getSUnit();
2692 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2693 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2694 if (!PI->isAssignedRegDep())
2697 for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) {
2698 // Return true if SU clobbers this physical register use and the
2699 // definition of the register reaches from DepSU. IsReachable queries a
2700 // topological forward sort of the DAG (following the successors).
2701 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2702 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2710 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2711 /// physical register defs.
2712 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2713 const TargetInstrInfo *TII,
2714 const TargetRegisterInfo *TRI) {
2715 SDNode *N = SuccSU->getNode();
2716 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2717 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2718 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2719 for (const SDNode *SUNode = SU->getNode(); SUNode;
2720 SUNode = SUNode->getGluedNode()) {
2721 if (!SUNode->isMachineOpcode())
2723 const unsigned *SUImpDefs =
2724 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2727 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2728 EVT VT = N->getValueType(i);
2729 if (VT == MVT::Glue || VT == MVT::Other)
2731 if (!N->hasAnyUseOfValue(i))
2733 unsigned Reg = ImpDefs[i - NumDefs];
2734 for (;*SUImpDefs; ++SUImpDefs) {
2735 unsigned SUReg = *SUImpDefs;
2736 if (TRI->regsOverlap(Reg, SUReg))
2744 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2745 /// are not handled well by the general register pressure reduction
2746 /// heuristics. When presented with code like this:
2755 /// the heuristics tend to push the store up, but since the
2756 /// operand of the store has another use (U), this would increase
2757 /// the length of that other use (the U->N edge).
2759 /// This function transforms code like the above to route U's
2760 /// dependence through the store when possible, like this:
2771 /// This results in the store being scheduled immediately
2772 /// after N, which shortens the U->N live range, reducing
2773 /// register pressure.
2775 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2776 // Visit all the nodes in topological order, working top-down.
2777 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2778 SUnit *SU = &(*SUnits)[i];
2779 // For now, only look at nodes with no data successors, such as stores.
2780 // These are especially important, due to the heuristics in
2781 // getNodePriority for nodes with no data successors.
2782 if (SU->NumSuccs != 0)
2784 // For now, only look at nodes with exactly one data predecessor.
2785 if (SU->NumPreds != 1)
2787 // Avoid prescheduling copies to virtual registers, which don't behave
2788 // like other nodes from the perspective of scheduling heuristics.
2789 if (SDNode *N = SU->getNode())
2790 if (N->getOpcode() == ISD::CopyToReg &&
2791 TargetRegisterInfo::isVirtualRegister
2792 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2795 // Locate the single data predecessor.
2797 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2798 EE = SU->Preds.end(); II != EE; ++II)
2799 if (!II->isCtrl()) {
2800 PredSU = II->getSUnit();
2805 // Don't rewrite edges that carry physregs, because that requires additional
2806 // support infrastructure.
2807 if (PredSU->hasPhysRegDefs)
2809 // Short-circuit the case where SU is PredSU's only data successor.
2810 if (PredSU->NumSuccs == 1)
2812 // Avoid prescheduling to copies from virtual registers, which don't behave
2813 // like other nodes from the perspective of scheduling heuristics.
2814 if (SDNode *N = SU->getNode())
2815 if (N->getOpcode() == ISD::CopyFromReg &&
2816 TargetRegisterInfo::isVirtualRegister
2817 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2820 // Perform checks on the successors of PredSU.
2821 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2822 EE = PredSU->Succs.end(); II != EE; ++II) {
2823 SUnit *PredSuccSU = II->getSUnit();
2824 if (PredSuccSU == SU) continue;
2825 // If PredSU has another successor with no data successors, for
2826 // now don't attempt to choose either over the other.
2827 if (PredSuccSU->NumSuccs == 0)
2828 goto outer_loop_continue;
2829 // Don't break physical register dependencies.
2830 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2831 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2832 goto outer_loop_continue;
2833 // Don't introduce graph cycles.
2834 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2835 goto outer_loop_continue;
2838 // Ok, the transformation is safe and the heuristics suggest it is
2839 // profitable. Update the graph.
2840 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2841 << " next to PredSU #" << PredSU->NodeNum
2842 << " to guide scheduling in the presence of multiple uses\n");
2843 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2844 SDep Edge = PredSU->Succs[i];
2845 assert(!Edge.isAssignedRegDep());
2846 SUnit *SuccSU = Edge.getSUnit();
2848 Edge.setSUnit(PredSU);
2849 scheduleDAG->RemovePred(SuccSU, Edge);
2850 scheduleDAG->AddPred(SU, Edge);
2852 scheduleDAG->AddPred(SuccSU, Edge);
2856 outer_loop_continue:;
2860 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2861 /// it as a def&use operand. Add a pseudo control edge from it to the other
2862 /// node (if it won't create a cycle) so the two-address one will be scheduled
2863 /// first (lower in the schedule). If both nodes are two-address, favor the
2864 /// one that has a CopyToReg use (more likely to be a loop induction update).
2865 /// If both are two-address, but one is commutable while the other is not
2866 /// commutable, favor the one that's not commutable.
2867 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2868 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2869 SUnit *SU = &(*SUnits)[i];
2870 if (!SU->isTwoAddress)
2873 SDNode *Node = SU->getNode();
2874 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2877 bool isLiveOut = hasOnlyLiveOutUses(SU);
2878 unsigned Opc = Node->getMachineOpcode();
2879 const MCInstrDesc &MCID = TII->get(Opc);
2880 unsigned NumRes = MCID.getNumDefs();
2881 unsigned NumOps = MCID.getNumOperands() - NumRes;
2882 for (unsigned j = 0; j != NumOps; ++j) {
2883 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2885 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2886 if (DU->getNodeId() == -1)
2888 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2889 if (!DUSU) continue;
2890 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2891 E = DUSU->Succs.end(); I != E; ++I) {
2892 if (I->isCtrl()) continue;
2893 SUnit *SuccSU = I->getSUnit();
2896 // Be conservative. Ignore if nodes aren't at roughly the same
2897 // depth and height.
2898 if (SuccSU->getHeight() < SU->getHeight() &&
2899 (SU->getHeight() - SuccSU->getHeight()) > 1)
2901 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2902 // constrains whatever is using the copy, instead of the copy
2903 // itself. In the case that the copy is coalesced, this
2904 // preserves the intent of the pseudo two-address heurietics.
2905 while (SuccSU->Succs.size() == 1 &&
2906 SuccSU->getNode()->isMachineOpcode() &&
2907 SuccSU->getNode()->getMachineOpcode() ==
2908 TargetOpcode::COPY_TO_REGCLASS)
2909 SuccSU = SuccSU->Succs.front().getSUnit();
2910 // Don't constrain non-instruction nodes.
2911 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2913 // Don't constrain nodes with physical register defs if the
2914 // predecessor can clobber them.
2915 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2916 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2919 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2920 // these may be coalesced away. We want them close to their uses.
2921 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2922 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2923 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2924 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2926 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2927 (!canClobber(SuccSU, DUSU) ||
2928 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2929 (!SU->isCommutable && SuccSU->isCommutable)) &&
2930 !scheduleDAG->IsReachable(SuccSU, SU)) {
2931 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2932 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2933 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2934 /*Reg=*/0, /*isNormalMemory=*/false,
2935 /*isMustAlias=*/false,
2936 /*isArtificial=*/true));
2943 //===----------------------------------------------------------------------===//
2944 // Public Constructor Functions
2945 //===----------------------------------------------------------------------===//
2947 llvm::ScheduleDAGSDNodes *
2948 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2949 CodeGenOpt::Level OptLevel) {
2950 const TargetMachine &TM = IS->TM;
2951 const TargetInstrInfo *TII = TM.getInstrInfo();
2952 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2954 BURegReductionPriorityQueue *PQ =
2955 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2956 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2957 PQ->setScheduleDAG(SD);
2961 llvm::ScheduleDAGSDNodes *
2962 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2963 CodeGenOpt::Level OptLevel) {
2964 const TargetMachine &TM = IS->TM;
2965 const TargetInstrInfo *TII = TM.getInstrInfo();
2966 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2968 SrcRegReductionPriorityQueue *PQ =
2969 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2970 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2971 PQ->setScheduleDAG(SD);
2975 llvm::ScheduleDAGSDNodes *
2976 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2977 CodeGenOpt::Level OptLevel) {
2978 const TargetMachine &TM = IS->TM;
2979 const TargetInstrInfo *TII = TM.getInstrInfo();
2980 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2981 const TargetLowering *TLI = &IS->getTargetLowering();
2983 HybridBURRPriorityQueue *PQ =
2984 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2986 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2987 PQ->setScheduleDAG(SD);
2991 llvm::ScheduleDAGSDNodes *
2992 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2993 CodeGenOpt::Level OptLevel) {
2994 const TargetMachine &TM = IS->TM;
2995 const TargetInstrInfo *TII = TM.getInstrInfo();
2996 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2997 const TargetLowering *TLI = &IS->getTargetLowering();
2999 ILPBURRPriorityQueue *PQ =
3000 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
3001 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3002 PQ->setScheduleDAG(SD);