1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/DataLayout.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 sourceListDAGScheduler("source",
49 "Similar to list-burr but schedules in source "
50 "order when possible",
51 createSourceListDAGScheduler);
53 static RegisterScheduler
54 hybridListDAGScheduler("list-hybrid",
55 "Bottom-up register pressure aware list scheduling "
56 "which tries to balance latency and register pressure",
57 createHybridListDAGScheduler);
59 static RegisterScheduler
60 ILPListDAGScheduler("list-ilp",
61 "Bottom-up register pressure aware list scheduling "
62 "which tries to balance ILP and register pressure",
63 createILPListDAGScheduler);
65 static cl::opt<bool> DisableSchedCycles(
66 "disable-sched-cycles", cl::Hidden, cl::init(false),
67 cl::desc("Disable cycle-level precision during preRA scheduling"));
69 // Temporary sched=list-ilp flags until the heuristics are robust.
70 // Some options are also available under sched=list-hybrid.
71 static cl::opt<bool> DisableSchedRegPressure(
72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
73 cl::desc("Disable regpressure priority in sched=list-ilp"));
74 static cl::opt<bool> DisableSchedLiveUses(
75 "disable-sched-live-uses", cl::Hidden, cl::init(true),
76 cl::desc("Disable live use priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedVRegCycle(
78 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
79 cl::desc("Disable virtual register cycle interference checks"));
80 static cl::opt<bool> DisableSchedPhysRegJoin(
81 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
82 cl::desc("Disable physreg def-use affinity"));
83 static cl::opt<bool> DisableSchedStalls(
84 "disable-sched-stalls", cl::Hidden, cl::init(true),
85 cl::desc("Disable no-stall priority in sched=list-ilp"));
86 static cl::opt<bool> DisableSchedCriticalPath(
87 "disable-sched-critical-path", cl::Hidden, cl::init(false),
88 cl::desc("Disable critical path priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedHeight(
90 "disable-sched-height", cl::Hidden, cl::init(false),
91 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
92 static cl::opt<bool> Disable2AddrHack(
93 "disable-2addr-hack", cl::Hidden, cl::init(true),
94 cl::desc("Disable scheduler's two-address hack"));
96 static cl::opt<int> MaxReorderWindow(
97 "max-sched-reorder", cl::Hidden, cl::init(6),
98 cl::desc("Number of instructions to allow ahead of the critical path "
99 "in sched=list-ilp"));
101 static cl::opt<unsigned> AvgIPC(
102 "sched-avg-ipc", cl::Hidden, cl::init(1),
103 cl::desc("Average inst/cycle whan no target itinerary exists."));
106 //===----------------------------------------------------------------------===//
107 /// ScheduleDAGRRList - The actual register reduction list scheduler
108 /// implementation. This supports both top-down and bottom-up scheduling.
110 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
112 /// NeedLatency - True if the scheduler will make use of latency information.
116 /// AvailableQueue - The priority queue to use for the available SUnits.
117 SchedulingPriorityQueue *AvailableQueue;
119 /// PendingQueue - This contains all of the instructions whose operands have
120 /// been issued, but their results are not ready yet (due to the latency of
121 /// the operation). Once the operands becomes available, the instruction is
122 /// added to the AvailableQueue.
123 std::vector<SUnit*> PendingQueue;
125 /// HazardRec - The hazard recognizer to use.
126 ScheduleHazardRecognizer *HazardRec;
128 /// CurCycle - The current scheduler state corresponds to this cycle.
131 /// MinAvailableCycle - Cycle of the soonest available instruction.
132 unsigned MinAvailableCycle;
134 /// IssueCount - Count instructions issued in this cycle
135 /// Currently valid only for bottom-up scheduling.
138 /// LiveRegDefs - A set of physical registers and their definition
139 /// that are "live". These nodes must be scheduled before any other nodes that
140 /// modifies the registers can be scheduled.
141 unsigned NumLiveRegs;
142 std::vector<SUnit*> LiveRegDefs;
143 std::vector<SUnit*> LiveRegGens;
145 /// Topo - A topological ordering for SUnits which permits fast IsReachable
146 /// and similar queries.
147 ScheduleDAGTopologicalSort Topo;
149 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
151 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
154 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
155 SchedulingPriorityQueue *availqueue,
156 CodeGenOpt::Level OptLevel)
157 : ScheduleDAGSDNodes(mf),
158 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
161 const TargetMachine &tm = mf.getTarget();
162 if (DisableSchedCycles || !NeedLatency)
163 HazardRec = new ScheduleHazardRecognizer();
165 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
168 ~ScheduleDAGRRList() {
170 delete AvailableQueue;
175 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
177 /// IsReachable - Checks if SU is reachable from TargetSU.
178 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
179 return Topo.IsReachable(SU, TargetSU);
182 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
184 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
185 return Topo.WillCreateCycle(SU, TargetSU);
188 /// AddPred - adds a predecessor edge to SUnit SU.
189 /// This returns true if this is a new predecessor.
190 /// Updates the topological ordering if required.
191 void AddPred(SUnit *SU, const SDep &D) {
192 Topo.AddPred(SU, D.getSUnit());
196 /// RemovePred - removes a predecessor edge from SUnit SU.
197 /// This returns true if an edge was removed.
198 /// Updates the topological ordering if required.
199 void RemovePred(SUnit *SU, const SDep &D) {
200 Topo.RemovePred(SU, D.getSUnit());
205 bool isReady(SUnit *SU) {
206 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
207 AvailableQueue->isReady(SU);
210 void ReleasePred(SUnit *SU, const SDep *PredEdge);
211 void ReleasePredecessors(SUnit *SU);
212 void ReleasePending();
213 void AdvanceToCycle(unsigned NextCycle);
214 void AdvancePastStalls(SUnit *SU);
215 void EmitNode(SUnit *SU);
216 void ScheduleNodeBottomUp(SUnit*);
217 void CapturePred(SDep *PredEdge);
218 void UnscheduleNodeBottomUp(SUnit*);
219 void RestoreHazardCheckerBottomUp();
220 void BacktrackBottomUp(SUnit*, SUnit*);
221 SUnit *CopyAndMoveSuccessors(SUnit*);
222 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
223 const TargetRegisterClass*,
224 const TargetRegisterClass*,
225 SmallVector<SUnit*, 2>&);
226 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
228 SUnit *PickNodeToScheduleBottomUp();
229 void ListScheduleBottomUp();
231 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
232 /// Updates the topological ordering if required.
233 SUnit *CreateNewSUnit(SDNode *N) {
234 unsigned NumSUnits = SUnits.size();
235 SUnit *NewNode = newSUnit(N);
236 // Update the topological ordering.
237 if (NewNode->NodeNum >= NumSUnits)
238 Topo.InitDAGTopologicalSorting();
242 /// CreateClone - Creates a new SUnit from an existing one.
243 /// Updates the topological ordering if required.
244 SUnit *CreateClone(SUnit *N) {
245 unsigned NumSUnits = SUnits.size();
246 SUnit *NewNode = Clone(N);
247 // Update the topological ordering.
248 if (NewNode->NodeNum >= NumSUnits)
249 Topo.InitDAGTopologicalSorting();
253 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
254 /// need actual latency information but the hybrid scheduler does.
255 bool forceUnitLatencies() const {
259 } // end anonymous namespace
261 /// GetCostForDef - Looks up the register class and cost for a given definition.
262 /// Typically this just means looking up the representative register class,
263 /// but for untyped values (MVT::Untyped) it means inspecting the node's
264 /// opcode to determine what register class is being generated.
265 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
266 const TargetLowering *TLI,
267 const TargetInstrInfo *TII,
268 const TargetRegisterInfo *TRI,
269 unsigned &RegClass, unsigned &Cost,
270 const MachineFunction &MF) {
271 EVT VT = RegDefPos.GetValue();
273 // Special handling for untyped values. These values can only come from
274 // the expansion of custom DAG-to-DAG patterns.
275 if (VT == MVT::Untyped) {
276 const SDNode *Node = RegDefPos.GetNode();
277 unsigned Opcode = Node->getMachineOpcode();
279 if (Opcode == TargetOpcode::REG_SEQUENCE) {
280 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
281 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
282 RegClass = RC->getID();
287 unsigned Idx = RegDefPos.GetIdx();
288 const MCInstrDesc Desc = TII->get(Opcode);
289 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
290 RegClass = RC->getID();
291 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
292 // better way to determine it.
295 RegClass = TLI->getRepRegClassFor(VT)->getID();
296 Cost = TLI->getRepRegClassCostFor(VT);
300 /// Schedule - Schedule the DAG using list scheduling.
301 void ScheduleDAGRRList::Schedule() {
303 << "********** List Scheduling BB#" << BB->getNumber()
304 << " '" << BB->getName() << "' **********\n");
308 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
310 // Allocate slots for each physical register, plus one for a special register
311 // to track the virtual resource of a calling sequence.
312 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
313 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
314 CallSeqEndForStart.clear();
316 // Build the scheduling graph.
317 BuildSchedGraph(NULL);
319 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
320 SUnits[su].dumpAll(this));
321 Topo.InitDAGTopologicalSorting();
323 AvailableQueue->initNodes(SUnits);
327 // Execute the actual scheduling loop.
328 ListScheduleBottomUp();
330 AvailableQueue->releaseState();
333 dbgs() << "*** Final schedule ***\n";
339 //===----------------------------------------------------------------------===//
340 // Bottom-Up Scheduling
341 //===----------------------------------------------------------------------===//
343 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
344 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
345 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
346 SUnit *PredSU = PredEdge->getSUnit();
349 if (PredSU->NumSuccsLeft == 0) {
350 dbgs() << "*** Scheduling failed! ***\n";
352 dbgs() << " has been released too many times!\n";
356 --PredSU->NumSuccsLeft;
358 if (!forceUnitLatencies()) {
359 // Updating predecessor's height. This is now the cycle when the
360 // predecessor can be scheduled without causing a pipeline stall.
361 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
364 // If all the node's successors are scheduled, this node is ready
365 // to be scheduled. Ignore the special EntrySU node.
366 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
367 PredSU->isAvailable = true;
369 unsigned Height = PredSU->getHeight();
370 if (Height < MinAvailableCycle)
371 MinAvailableCycle = Height;
373 if (isReady(PredSU)) {
374 AvailableQueue->push(PredSU);
376 // CapturePred and others may have left the node in the pending queue, avoid
378 else if (!PredSU->isPending) {
379 PredSU->isPending = true;
380 PendingQueue.push_back(PredSU);
385 /// IsChainDependent - Test if Outer is reachable from Inner through
386 /// chain dependencies.
387 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
389 const TargetInstrInfo *TII) {
394 // For a TokenFactor, examine each operand. There may be multiple ways
395 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
396 // most nesting in order to ensure that we find the corresponding match.
397 if (N->getOpcode() == ISD::TokenFactor) {
398 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
399 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
403 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
404 if (N->isMachineOpcode()) {
405 if (N->getMachineOpcode() ==
406 (unsigned)TII->getCallFrameDestroyOpcode()) {
408 } else if (N->getMachineOpcode() ==
409 (unsigned)TII->getCallFrameSetupOpcode()) {
415 // Otherwise, find the chain and continue climbing.
416 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
417 if (N->getOperand(i).getValueType() == MVT::Other) {
418 N = N->getOperand(i).getNode();
419 goto found_chain_operand;
422 found_chain_operand:;
423 if (N->getOpcode() == ISD::EntryToken)
428 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
429 /// the corresponding (lowered) CALLSEQ_BEGIN node.
431 /// NestLevel and MaxNested are used in recursion to indcate the current level
432 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
433 /// level seen so far.
435 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
436 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
438 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
439 const TargetInstrInfo *TII) {
441 // For a TokenFactor, examine each operand. There may be multiple ways
442 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
443 // most nesting in order to ensure that we find the corresponding match.
444 if (N->getOpcode() == ISD::TokenFactor) {
446 unsigned BestMaxNest = MaxNest;
447 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
448 unsigned MyNestLevel = NestLevel;
449 unsigned MyMaxNest = MaxNest;
450 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
451 MyNestLevel, MyMaxNest, TII))
452 if (!Best || (MyMaxNest > BestMaxNest)) {
454 BestMaxNest = MyMaxNest;
458 MaxNest = BestMaxNest;
461 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
462 if (N->isMachineOpcode()) {
463 if (N->getMachineOpcode() ==
464 (unsigned)TII->getCallFrameDestroyOpcode()) {
466 MaxNest = std::max(MaxNest, NestLevel);
467 } else if (N->getMachineOpcode() ==
468 (unsigned)TII->getCallFrameSetupOpcode()) {
469 assert(NestLevel != 0);
475 // Otherwise, find the chain and continue climbing.
476 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
477 if (N->getOperand(i).getValueType() == MVT::Other) {
478 N = N->getOperand(i).getNode();
479 goto found_chain_operand;
482 found_chain_operand:;
483 if (N->getOpcode() == ISD::EntryToken)
488 /// Call ReleasePred for each predecessor, then update register live def/gen.
489 /// Always update LiveRegDefs for a register dependence even if the current SU
490 /// also defines the register. This effectively create one large live range
491 /// across a sequence of two-address node. This is important because the
492 /// entire chain must be scheduled together. Example:
495 /// flags = (2) addc flags
496 /// flags = (1) addc flags
500 /// LiveRegDefs[flags] = 3
501 /// LiveRegGens[flags] = 1
503 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
504 /// interference on flags.
505 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
506 // Bottom up: release predecessors
507 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
509 ReleasePred(SU, &*I);
510 if (I->isAssignedRegDep()) {
511 // This is a physical register dependency and it's impossible or
512 // expensive to copy the register. Make sure nothing that can
513 // clobber the register is scheduled between the predecessor and
515 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
516 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
517 "interference on register dependence");
518 LiveRegDefs[I->getReg()] = I->getSUnit();
519 if (!LiveRegGens[I->getReg()]) {
521 LiveRegGens[I->getReg()] = SU;
526 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
527 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
528 // these nodes, to prevent other calls from being interscheduled with them.
529 unsigned CallResource = TRI->getNumRegs();
530 if (!LiveRegDefs[CallResource])
531 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
532 if (Node->isMachineOpcode() &&
533 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
534 unsigned NestLevel = 0;
535 unsigned MaxNest = 0;
536 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
538 SUnit *Def = &SUnits[N->getNodeId()];
539 CallSeqEndForStart[Def] = SU;
542 LiveRegDefs[CallResource] = Def;
543 LiveRegGens[CallResource] = SU;
548 /// Check to see if any of the pending instructions are ready to issue. If
549 /// so, add them to the available queue.
550 void ScheduleDAGRRList::ReleasePending() {
551 if (DisableSchedCycles) {
552 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
556 // If the available queue is empty, it is safe to reset MinAvailableCycle.
557 if (AvailableQueue->empty())
558 MinAvailableCycle = UINT_MAX;
560 // Check to see if any of the pending instructions are ready to issue. If
561 // so, add them to the available queue.
562 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
563 unsigned ReadyCycle = PendingQueue[i]->getHeight();
564 if (ReadyCycle < MinAvailableCycle)
565 MinAvailableCycle = ReadyCycle;
567 if (PendingQueue[i]->isAvailable) {
568 if (!isReady(PendingQueue[i]))
570 AvailableQueue->push(PendingQueue[i]);
572 PendingQueue[i]->isPending = false;
573 PendingQueue[i] = PendingQueue.back();
574 PendingQueue.pop_back();
579 /// Move the scheduler state forward by the specified number of Cycles.
580 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
581 if (NextCycle <= CurCycle)
585 AvailableQueue->setCurCycle(NextCycle);
586 if (!HazardRec->isEnabled()) {
587 // Bypass lots of virtual calls in case of long latency.
588 CurCycle = NextCycle;
591 for (; CurCycle != NextCycle; ++CurCycle) {
592 HazardRec->RecedeCycle();
595 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
596 // available Q to release pending nodes at least once before popping.
600 /// Move the scheduler state forward until the specified node's dependents are
601 /// ready and can be scheduled with no resource conflicts.
602 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
603 if (DisableSchedCycles)
606 // FIXME: Nodes such as CopyFromReg probably should not advance the current
607 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
608 // has predecessors the cycle will be advanced when they are scheduled.
609 // But given the crude nature of modeling latency though such nodes, we
610 // currently need to treat these nodes like real instructions.
611 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
613 unsigned ReadyCycle = SU->getHeight();
615 // Bump CurCycle to account for latency. We assume the latency of other
616 // available instructions may be hidden by the stall (not a full pipe stall).
617 // This updates the hazard recognizer's cycle before reserving resources for
619 AdvanceToCycle(ReadyCycle);
621 // Calls are scheduled in their preceding cycle, so don't conflict with
622 // hazards from instructions after the call. EmitNode will reset the
623 // scoreboard state before emitting the call.
627 // FIXME: For resource conflicts in very long non-pipelined stages, we
628 // should probably skip ahead here to avoid useless scoreboard checks.
631 ScheduleHazardRecognizer::HazardType HT =
632 HazardRec->getHazardType(SU, -Stalls);
634 if (HT == ScheduleHazardRecognizer::NoHazard)
639 AdvanceToCycle(CurCycle + Stalls);
642 /// Record this SUnit in the HazardRecognizer.
643 /// Does not update CurCycle.
644 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
645 if (!HazardRec->isEnabled())
648 // Check for phys reg copy.
652 switch (SU->getNode()->getOpcode()) {
654 assert(SU->getNode()->isMachineOpcode() &&
655 "This target-independent node should not be scheduled.");
657 case ISD::MERGE_VALUES:
658 case ISD::TokenFactor:
659 case ISD::LIFETIME_START:
660 case ISD::LIFETIME_END:
662 case ISD::CopyFromReg:
664 // Noops don't affect the scoreboard state. Copies are likely to be
668 // For inline asm, clear the pipeline state.
673 // Calls are scheduled with their preceding instructions. For bottom-up
674 // scheduling, clear the pipeline state before emitting.
678 HazardRec->EmitInstruction(SU);
681 static void resetVRegCycle(SUnit *SU);
683 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
684 /// count of its predecessors. If a predecessor pending count is zero, add it to
685 /// the Available queue.
686 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
687 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
688 DEBUG(SU->dump(this));
691 if (CurCycle < SU->getHeight())
692 DEBUG(dbgs() << " Height [" << SU->getHeight()
693 << "] pipeline stall!\n");
696 // FIXME: Do not modify node height. It may interfere with
697 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
698 // node its ready cycle can aid heuristics, and after scheduling it can
699 // indicate the scheduled cycle.
700 SU->setHeightToAtLeast(CurCycle);
702 // Reserve resources for the scheduled intruction.
705 Sequence.push_back(SU);
707 AvailableQueue->scheduledNode(SU);
709 // If HazardRec is disabled, and each inst counts as one cycle, then
710 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
711 // PendingQueue for schedulers that implement HasReadyFilter.
712 if (!HazardRec->isEnabled() && AvgIPC < 2)
713 AdvanceToCycle(CurCycle + 1);
715 // Update liveness of predecessors before successors to avoid treating a
716 // two-address node as a live range def.
717 ReleasePredecessors(SU);
719 // Release all the implicit physical register defs that are live.
720 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
722 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
723 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
724 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
726 LiveRegDefs[I->getReg()] = NULL;
727 LiveRegGens[I->getReg()] = NULL;
730 // Release the special call resource dependence, if this is the beginning
732 unsigned CallResource = TRI->getNumRegs();
733 if (LiveRegDefs[CallResource] == SU)
734 for (const SDNode *SUNode = SU->getNode(); SUNode;
735 SUNode = SUNode->getGluedNode()) {
736 if (SUNode->isMachineOpcode() &&
737 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
738 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
740 LiveRegDefs[CallResource] = NULL;
741 LiveRegGens[CallResource] = NULL;
747 SU->isScheduled = true;
749 // Conditions under which the scheduler should eagerly advance the cycle:
750 // (1) No available instructions
751 // (2) All pipelines full, so available instructions must have hazards.
753 // If HazardRec is disabled, the cycle was pre-advanced before calling
754 // ReleasePredecessors. In that case, IssueCount should remain 0.
756 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
757 if (HazardRec->isEnabled() || AvgIPC > 1) {
758 if (SU->getNode() && SU->getNode()->isMachineOpcode())
760 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
761 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
762 AdvanceToCycle(CurCycle + 1);
766 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
767 /// unscheduled, incrcease the succ left count of its predecessors. Remove
768 /// them from AvailableQueue if necessary.
769 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
770 SUnit *PredSU = PredEdge->getSUnit();
771 if (PredSU->isAvailable) {
772 PredSU->isAvailable = false;
773 if (!PredSU->isPending)
774 AvailableQueue->remove(PredSU);
777 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
778 ++PredSU->NumSuccsLeft;
781 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
782 /// its predecessor states to reflect the change.
783 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
784 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
785 DEBUG(SU->dump(this));
787 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
790 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
791 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
792 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
793 "Physical register dependency violated?");
795 LiveRegDefs[I->getReg()] = NULL;
796 LiveRegGens[I->getReg()] = NULL;
800 // Reclaim the special call resource dependence, if this is the beginning
802 unsigned CallResource = TRI->getNumRegs();
803 for (const SDNode *SUNode = SU->getNode(); SUNode;
804 SUNode = SUNode->getGluedNode()) {
805 if (SUNode->isMachineOpcode() &&
806 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
808 LiveRegDefs[CallResource] = SU;
809 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
813 // Release the special call resource dependence, if this is the end
815 if (LiveRegGens[CallResource] == SU)
816 for (const SDNode *SUNode = SU->getNode(); SUNode;
817 SUNode = SUNode->getGluedNode()) {
818 if (SUNode->isMachineOpcode() &&
819 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
820 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
822 LiveRegDefs[CallResource] = NULL;
823 LiveRegGens[CallResource] = NULL;
827 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
829 if (I->isAssignedRegDep()) {
830 if (!LiveRegDefs[I->getReg()])
832 // This becomes the nearest def. Note that an earlier def may still be
833 // pending if this is a two-address node.
834 LiveRegDefs[I->getReg()] = SU;
835 if (LiveRegGens[I->getReg()] == NULL ||
836 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
837 LiveRegGens[I->getReg()] = I->getSUnit();
840 if (SU->getHeight() < MinAvailableCycle)
841 MinAvailableCycle = SU->getHeight();
843 SU->setHeightDirty();
844 SU->isScheduled = false;
845 SU->isAvailable = true;
846 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
847 // Don't make available until backtracking is complete.
848 SU->isPending = true;
849 PendingQueue.push_back(SU);
852 AvailableQueue->push(SU);
854 AvailableQueue->unscheduledNode(SU);
857 /// After backtracking, the hazard checker needs to be restored to a state
858 /// corresponding the current cycle.
859 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
862 unsigned LookAhead = std::min((unsigned)Sequence.size(),
863 HazardRec->getMaxLookAhead());
867 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
868 unsigned HazardCycle = (*I)->getHeight();
869 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
871 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
872 HazardRec->RecedeCycle();
878 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
879 /// BTCycle in order to schedule a specific node.
880 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
881 SUnit *OldSU = Sequence.back();
884 if (SU->isSucc(OldSU))
885 // Don't try to remove SU from AvailableQueue.
886 SU->isAvailable = false;
887 // FIXME: use ready cycle instead of height
888 CurCycle = OldSU->getHeight();
889 UnscheduleNodeBottomUp(OldSU);
890 AvailableQueue->setCurCycle(CurCycle);
893 OldSU = Sequence.back();
896 assert(!SU->isSucc(OldSU) && "Something is wrong!");
898 RestoreHazardCheckerBottomUp();
905 static bool isOperandOf(const SUnit *SU, SDNode *N) {
906 for (const SDNode *SUNode = SU->getNode(); SUNode;
907 SUNode = SUNode->getGluedNode()) {
908 if (SUNode->isOperandOf(N))
914 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
915 /// successors to the newly created node.
916 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
917 SDNode *N = SU->getNode();
921 if (SU->getNode()->getGluedNode())
925 bool TryUnfold = false;
926 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
927 EVT VT = N->getValueType(i);
930 else if (VT == MVT::Other)
933 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
934 const SDValue &Op = N->getOperand(i);
935 EVT VT = Op.getNode()->getValueType(Op.getResNo());
941 SmallVector<SDNode*, 2> NewNodes;
942 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
945 // unfolding an x86 DEC64m operation results in store, dec, load which
946 // can't be handled here so quit
947 if (NewNodes.size() == 3)
950 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
951 assert(NewNodes.size() == 2 && "Expected a load folding node!");
954 SDNode *LoadNode = NewNodes[0];
955 unsigned NumVals = N->getNumValues();
956 unsigned OldNumVals = SU->getNode()->getNumValues();
957 for (unsigned i = 0; i != NumVals; ++i)
958 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
959 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
960 SDValue(LoadNode, 1));
962 // LoadNode may already exist. This can happen when there is another
963 // load from the same location and producing the same type of value
964 // but it has different alignment or volatileness.
965 bool isNewLoad = true;
967 if (LoadNode->getNodeId() != -1) {
968 LoadSU = &SUnits[LoadNode->getNodeId()];
971 LoadSU = CreateNewSUnit(LoadNode);
972 LoadNode->setNodeId(LoadSU->NodeNum);
974 InitNumRegDefsLeft(LoadSU);
975 computeLatency(LoadSU);
978 SUnit *NewSU = CreateNewSUnit(N);
979 assert(N->getNodeId() == -1 && "Node already inserted!");
980 N->setNodeId(NewSU->NodeNum);
982 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
983 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
984 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
985 NewSU->isTwoAddress = true;
989 if (MCID.isCommutable())
990 NewSU->isCommutable = true;
992 InitNumRegDefsLeft(NewSU);
993 computeLatency(NewSU);
995 // Record all the edges to and from the old SU, by category.
996 SmallVector<SDep, 4> ChainPreds;
997 SmallVector<SDep, 4> ChainSuccs;
998 SmallVector<SDep, 4> LoadPreds;
999 SmallVector<SDep, 4> NodePreds;
1000 SmallVector<SDep, 4> NodeSuccs;
1001 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1004 ChainPreds.push_back(*I);
1005 else if (isOperandOf(I->getSUnit(), LoadNode))
1006 LoadPreds.push_back(*I);
1008 NodePreds.push_back(*I);
1010 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1013 ChainSuccs.push_back(*I);
1015 NodeSuccs.push_back(*I);
1018 // Now assign edges to the newly-created nodes.
1019 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1020 const SDep &Pred = ChainPreds[i];
1021 RemovePred(SU, Pred);
1023 AddPred(LoadSU, Pred);
1025 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1026 const SDep &Pred = LoadPreds[i];
1027 RemovePred(SU, Pred);
1029 AddPred(LoadSU, Pred);
1031 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1032 const SDep &Pred = NodePreds[i];
1033 RemovePred(SU, Pred);
1034 AddPred(NewSU, Pred);
1036 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1037 SDep D = NodeSuccs[i];
1038 SUnit *SuccDep = D.getSUnit();
1040 RemovePred(SuccDep, D);
1042 AddPred(SuccDep, D);
1043 // Balance register pressure.
1044 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1045 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1046 --NewSU->NumRegDefsLeft;
1048 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1049 SDep D = ChainSuccs[i];
1050 SUnit *SuccDep = D.getSUnit();
1052 RemovePred(SuccDep, D);
1055 AddPred(SuccDep, D);
1059 // Add a data dependency to reflect that NewSU reads the value defined
1061 SDep D(LoadSU, SDep::Data, 0);
1062 D.setLatency(LoadSU->Latency);
1066 AvailableQueue->addNode(LoadSU);
1067 AvailableQueue->addNode(NewSU);
1071 if (NewSU->NumSuccsLeft == 0) {
1072 NewSU->isAvailable = true;
1078 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1079 NewSU = CreateClone(SU);
1081 // New SUnit has the exact same predecessors.
1082 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1084 if (!I->isArtificial())
1087 // Only copy scheduled successors. Cut them from old node's successor
1088 // list and move them over.
1089 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1090 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1092 if (I->isArtificial())
1094 SUnit *SuccSU = I->getSUnit();
1095 if (SuccSU->isScheduled) {
1100 DelDeps.push_back(std::make_pair(SuccSU, D));
1103 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1104 RemovePred(DelDeps[i].first, DelDeps[i].second);
1106 AvailableQueue->updateNode(SU);
1107 AvailableQueue->addNode(NewSU);
1113 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1114 /// scheduled successors of the given SUnit to the last copy.
1115 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1116 const TargetRegisterClass *DestRC,
1117 const TargetRegisterClass *SrcRC,
1118 SmallVector<SUnit*, 2> &Copies) {
1119 SUnit *CopyFromSU = CreateNewSUnit(NULL);
1120 CopyFromSU->CopySrcRC = SrcRC;
1121 CopyFromSU->CopyDstRC = DestRC;
1123 SUnit *CopyToSU = CreateNewSUnit(NULL);
1124 CopyToSU->CopySrcRC = DestRC;
1125 CopyToSU->CopyDstRC = SrcRC;
1127 // Only copy scheduled successors. Cut them from old node's successor
1128 // list and move them over.
1129 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1130 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1132 if (I->isArtificial())
1134 SUnit *SuccSU = I->getSUnit();
1135 if (SuccSU->isScheduled) {
1137 D.setSUnit(CopyToSU);
1139 DelDeps.push_back(std::make_pair(SuccSU, *I));
1142 // Avoid scheduling the def-side copy before other successors. Otherwise
1143 // we could introduce another physreg interference on the copy and
1144 // continue inserting copies indefinitely.
1145 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1148 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1149 RemovePred(DelDeps[i].first, DelDeps[i].second);
1151 SDep FromDep(SU, SDep::Data, Reg);
1152 FromDep.setLatency(SU->Latency);
1153 AddPred(CopyFromSU, FromDep);
1154 SDep ToDep(CopyFromSU, SDep::Data, 0);
1155 ToDep.setLatency(CopyFromSU->Latency);
1156 AddPred(CopyToSU, ToDep);
1158 AvailableQueue->updateNode(SU);
1159 AvailableQueue->addNode(CopyFromSU);
1160 AvailableQueue->addNode(CopyToSU);
1161 Copies.push_back(CopyFromSU);
1162 Copies.push_back(CopyToSU);
1167 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1168 /// definition of the specified node.
1169 /// FIXME: Move to SelectionDAG?
1170 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1171 const TargetInstrInfo *TII) {
1172 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1173 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1174 unsigned NumRes = MCID.getNumDefs();
1175 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1180 return N->getValueType(NumRes);
1183 /// CheckForLiveRegDef - Return true and update live register vector if the
1184 /// specified register def of the specified SUnit clobbers any "live" registers.
1185 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1186 std::vector<SUnit*> &LiveRegDefs,
1187 SmallSet<unsigned, 4> &RegAdded,
1188 SmallVector<unsigned, 4> &LRegs,
1189 const TargetRegisterInfo *TRI) {
1190 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1192 // Check if Ref is live.
1193 if (!LiveRegDefs[*AliasI]) continue;
1195 // Allow multiple uses of the same def.
1196 if (LiveRegDefs[*AliasI] == SU) continue;
1198 // Add Reg to the set of interfering live regs.
1199 if (RegAdded.insert(*AliasI)) {
1200 LRegs.push_back(*AliasI);
1205 /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1206 /// by RegMask, and add them to LRegs.
1207 static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1208 std::vector<SUnit*> &LiveRegDefs,
1209 SmallSet<unsigned, 4> &RegAdded,
1210 SmallVector<unsigned, 4> &LRegs) {
1211 // Look at all live registers. Skip Reg0 and the special CallResource.
1212 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1213 if (!LiveRegDefs[i]) continue;
1214 if (LiveRegDefs[i] == SU) continue;
1215 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1216 if (RegAdded.insert(i))
1221 /// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
1222 static const uint32_t *getNodeRegMask(const SDNode *N) {
1223 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1224 if (const RegisterMaskSDNode *Op =
1225 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
1226 return Op->getRegMask();
1230 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1231 /// scheduling of the given node to satisfy live physical register dependencies.
1232 /// If the specific node is the last one that's available to schedule, do
1233 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1234 bool ScheduleDAGRRList::
1235 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1236 if (NumLiveRegs == 0)
1239 SmallSet<unsigned, 4> RegAdded;
1240 // If this node would clobber any "live" register, then it's not ready.
1242 // If SU is the currently live definition of the same register that it uses,
1243 // then we are free to schedule it.
1244 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1246 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1247 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1248 RegAdded, LRegs, TRI);
1251 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1252 if (Node->getOpcode() == ISD::INLINEASM) {
1253 // Inline asm can clobber physical defs.
1254 unsigned NumOps = Node->getNumOperands();
1255 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1256 --NumOps; // Ignore the glue operand.
1258 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1260 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1261 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1263 ++i; // Skip the ID value.
1264 if (InlineAsm::isRegDefKind(Flags) ||
1265 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1266 InlineAsm::isClobberKind(Flags)) {
1267 // Check for def of register or earlyclobber register.
1268 for (; NumVals; --NumVals, ++i) {
1269 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1270 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1271 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1279 if (!Node->isMachineOpcode())
1281 // If we're in the middle of scheduling a call, don't begin scheduling
1282 // another call. Also, don't allow any physical registers to be live across
1284 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1285 // Check the special calling-sequence resource.
1286 unsigned CallResource = TRI->getNumRegs();
1287 if (LiveRegDefs[CallResource]) {
1288 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1289 while (SDNode *Glued = Gen->getGluedNode())
1291 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1292 LRegs.push_back(CallResource);
1295 if (const uint32_t *RegMask = getNodeRegMask(Node))
1296 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
1298 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1299 if (!MCID.ImplicitDefs)
1301 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1302 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1305 return !LRegs.empty();
1308 /// Return a node that can be scheduled in this cycle. Requirements:
1309 /// (1) Ready: latency has been satisfied
1310 /// (2) No Hazards: resources are available
1311 /// (3) No Interferences: may unschedule to break register interferences.
1312 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1313 SmallVector<SUnit*, 4> Interferences;
1314 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1316 SUnit *CurSU = AvailableQueue->pop();
1318 SmallVector<unsigned, 4> LRegs;
1319 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1321 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1323 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1324 Interferences.push_back(CurSU);
1325 CurSU = AvailableQueue->pop();
1328 // Add the nodes that aren't ready back onto the available list.
1329 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1330 Interferences[i]->isPending = false;
1331 assert(Interferences[i]->isAvailable && "must still be available");
1332 AvailableQueue->push(Interferences[i]);
1337 // All candidates are delayed due to live physical reg dependencies.
1338 // Try backtracking, code duplication, or inserting cross class copies
1340 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1341 SUnit *TrySU = Interferences[i];
1342 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1344 // Try unscheduling up to the point where it's safe to schedule
1347 unsigned LiveCycle = UINT_MAX;
1348 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1349 unsigned Reg = LRegs[j];
1350 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1351 BtSU = LiveRegGens[Reg];
1352 LiveCycle = BtSU->getHeight();
1355 if (!WillCreateCycle(TrySU, BtSU)) {
1356 BacktrackBottomUp(TrySU, BtSU);
1358 // Force the current node to be scheduled before the node that
1359 // requires the physical reg dep.
1360 if (BtSU->isAvailable) {
1361 BtSU->isAvailable = false;
1362 if (!BtSU->isPending)
1363 AvailableQueue->remove(BtSU);
1365 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1367 // If one or more successors has been unscheduled, then the current
1368 // node is no longer avaialable. Schedule a successor that's now
1369 // available instead.
1370 if (!TrySU->isAvailable) {
1371 CurSU = AvailableQueue->pop();
1375 TrySU->isPending = false;
1376 Interferences.erase(Interferences.begin()+i);
1383 // Can't backtrack. If it's too expensive to copy the value, then try
1384 // duplicate the nodes that produces these "too expensive to copy"
1385 // values to break the dependency. In case even that doesn't work,
1386 // insert cross class copies.
1387 // If it's not too expensive, i.e. cost != -1, issue copies.
1388 SUnit *TrySU = Interferences[0];
1389 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1390 assert(LRegs.size() == 1 && "Can't handle this yet!");
1391 unsigned Reg = LRegs[0];
1392 SUnit *LRDef = LiveRegDefs[Reg];
1393 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1394 const TargetRegisterClass *RC =
1395 TRI->getMinimalPhysRegClass(Reg, VT);
1396 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1398 // If cross copy register class is the same as RC, then it must be possible
1399 // copy the value directly. Do not try duplicate the def.
1400 // If cross copy register class is not the same as RC, then it's possible to
1401 // copy the value but it require cross register class copies and it is
1403 // If cross copy register class is null, then it's not possible to copy
1404 // the value at all.
1407 NewDef = CopyAndMoveSuccessors(LRDef);
1408 if (!DestRC && !NewDef)
1409 report_fatal_error("Can't handle live physical register dependency!");
1412 // Issue copies, these can be expensive cross register class copies.
1413 SmallVector<SUnit*, 2> Copies;
1414 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1415 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1416 << " to SU #" << Copies.front()->NodeNum << "\n");
1417 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1418 NewDef = Copies.back();
1421 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1422 << " to SU #" << TrySU->NodeNum << "\n");
1423 LiveRegDefs[Reg] = NewDef;
1424 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1425 TrySU->isAvailable = false;
1429 assert(CurSU && "Unable to resolve live physical register dependencies!");
1431 // Add the nodes that aren't ready back onto the available list.
1432 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1433 Interferences[i]->isPending = false;
1434 // May no longer be available due to backtracking.
1435 if (Interferences[i]->isAvailable) {
1436 AvailableQueue->push(Interferences[i]);
1442 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1444 void ScheduleDAGRRList::ListScheduleBottomUp() {
1445 // Release any predecessors of the special Exit node.
1446 ReleasePredecessors(&ExitSU);
1448 // Add root to Available queue.
1449 if (!SUnits.empty()) {
1450 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1451 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1452 RootSU->isAvailable = true;
1453 AvailableQueue->push(RootSU);
1456 // While Available queue is not empty, grab the node with the highest
1457 // priority. If it is not ready put it back. Schedule the node.
1458 Sequence.reserve(SUnits.size());
1459 while (!AvailableQueue->empty()) {
1460 DEBUG(dbgs() << "\nExamining Available:\n";
1461 AvailableQueue->dump(this));
1463 // Pick the best node to schedule taking all constraints into
1465 SUnit *SU = PickNodeToScheduleBottomUp();
1467 AdvancePastStalls(SU);
1469 ScheduleNodeBottomUp(SU);
1471 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1472 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1473 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1474 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1478 // Reverse the order if it is bottom up.
1479 std::reverse(Sequence.begin(), Sequence.end());
1482 VerifyScheduledSequence(/*isBottomUp=*/true);
1486 //===----------------------------------------------------------------------===//
1487 // RegReductionPriorityQueue Definition
1488 //===----------------------------------------------------------------------===//
1490 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1491 // to reduce register pressure.
1494 class RegReductionPQBase;
1496 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1497 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1502 struct reverse_sort : public queue_sort {
1504 reverse_sort(SF &sf) : SortFunc(sf) {}
1505 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1507 bool operator()(SUnit* left, SUnit* right) const {
1508 // reverse left/right rather than simply !SortFunc(left, right)
1509 // to expose different paths in the comparison logic.
1510 return SortFunc(right, left);
1515 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1516 // reduction scheduler.
1517 struct bu_ls_rr_sort : public queue_sort {
1520 HasReadyFilter = false
1523 RegReductionPQBase *SPQ;
1524 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1525 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1527 bool operator()(SUnit* left, SUnit* right) const;
1530 // src_ls_rr_sort - Priority function for source order scheduler.
1531 struct src_ls_rr_sort : public queue_sort {
1534 HasReadyFilter = false
1537 RegReductionPQBase *SPQ;
1538 src_ls_rr_sort(RegReductionPQBase *spq)
1540 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1543 bool operator()(SUnit* left, SUnit* right) const;
1546 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1547 struct hybrid_ls_rr_sort : public queue_sort {
1550 HasReadyFilter = false
1553 RegReductionPQBase *SPQ;
1554 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1556 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1559 bool isReady(SUnit *SU, unsigned CurCycle) const;
1561 bool operator()(SUnit* left, SUnit* right) const;
1564 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1566 struct ilp_ls_rr_sort : public queue_sort {
1569 HasReadyFilter = false
1572 RegReductionPQBase *SPQ;
1573 ilp_ls_rr_sort(RegReductionPQBase *spq)
1575 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1578 bool isReady(SUnit *SU, unsigned CurCycle) const;
1580 bool operator()(SUnit* left, SUnit* right) const;
1583 class RegReductionPQBase : public SchedulingPriorityQueue {
1585 std::vector<SUnit*> Queue;
1586 unsigned CurQueueId;
1587 bool TracksRegPressure;
1590 // SUnits - The SUnits for the current graph.
1591 std::vector<SUnit> *SUnits;
1593 MachineFunction &MF;
1594 const TargetInstrInfo *TII;
1595 const TargetRegisterInfo *TRI;
1596 const TargetLowering *TLI;
1597 ScheduleDAGRRList *scheduleDAG;
1599 // SethiUllmanNumbers - The SethiUllman number for each node.
1600 std::vector<unsigned> SethiUllmanNumbers;
1602 /// RegPressure - Tracking current reg pressure per register class.
1604 std::vector<unsigned> RegPressure;
1606 /// RegLimit - Tracking the number of allocatable registers per register
1608 std::vector<unsigned> RegLimit;
1611 RegReductionPQBase(MachineFunction &mf,
1612 bool hasReadyFilter,
1615 const TargetInstrInfo *tii,
1616 const TargetRegisterInfo *tri,
1617 const TargetLowering *tli)
1618 : SchedulingPriorityQueue(hasReadyFilter),
1619 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1620 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1621 if (TracksRegPressure) {
1622 unsigned NumRC = TRI->getNumRegClasses();
1623 RegLimit.resize(NumRC);
1624 RegPressure.resize(NumRC);
1625 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1626 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1627 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1628 E = TRI->regclass_end(); I != E; ++I)
1629 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1633 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1634 scheduleDAG = scheduleDag;
1637 ScheduleHazardRecognizer* getHazardRec() {
1638 return scheduleDAG->getHazardRec();
1641 void initNodes(std::vector<SUnit> &sunits);
1643 void addNode(const SUnit *SU);
1645 void updateNode(const SUnit *SU);
1647 void releaseState() {
1649 SethiUllmanNumbers.clear();
1650 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1653 unsigned getNodePriority(const SUnit *SU) const;
1655 unsigned getNodeOrdering(const SUnit *SU) const {
1656 if (!SU->getNode()) return 0;
1658 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1661 bool empty() const { return Queue.empty(); }
1663 void push(SUnit *U) {
1664 assert(!U->NodeQueueId && "Node in the queue already");
1665 U->NodeQueueId = ++CurQueueId;
1669 void remove(SUnit *SU) {
1670 assert(!Queue.empty() && "Queue is empty!");
1671 assert(SU->NodeQueueId != 0 && "Not in queue!");
1672 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1674 if (I != prior(Queue.end()))
1675 std::swap(*I, Queue.back());
1677 SU->NodeQueueId = 0;
1680 bool tracksRegPressure() const { return TracksRegPressure; }
1682 void dumpRegPressure() const;
1684 bool HighRegPressure(const SUnit *SU) const;
1686 bool MayReduceRegPressure(SUnit *SU) const;
1688 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1690 void scheduledNode(SUnit *SU);
1692 void unscheduledNode(SUnit *SU);
1695 bool canClobber(const SUnit *SU, const SUnit *Op);
1696 void AddPseudoTwoAddrDeps();
1697 void PrescheduleNodesWithMultipleUses();
1698 void CalculateSethiUllmanNumbers();
1702 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1703 std::vector<SUnit *>::iterator Best = Q.begin();
1704 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1705 E = Q.end(); I != E; ++I)
1706 if (Picker(*Best, *I))
1709 if (Best != prior(Q.end()))
1710 std::swap(*Best, Q.back());
1716 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1718 if (DAG->StressSched) {
1719 reverse_sort<SF> RPicker(Picker);
1720 return popFromQueueImpl(Q, RPicker);
1724 return popFromQueueImpl(Q, Picker);
1728 class RegReductionPriorityQueue : public RegReductionPQBase {
1732 RegReductionPriorityQueue(MachineFunction &mf,
1735 const TargetInstrInfo *tii,
1736 const TargetRegisterInfo *tri,
1737 const TargetLowering *tli)
1738 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1742 bool isBottomUp() const { return SF::IsBottomUp; }
1744 bool isReady(SUnit *U) const {
1745 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1749 if (Queue.empty()) return NULL;
1751 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1756 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1757 void dump(ScheduleDAG *DAG) const {
1758 // Emulate pop() without clobbering NodeQueueIds.
1759 std::vector<SUnit*> DumpQueue = Queue;
1760 SF DumpPicker = Picker;
1761 while (!DumpQueue.empty()) {
1762 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1763 dbgs() << "Height " << SU->getHeight() << ": ";
1770 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1771 BURegReductionPriorityQueue;
1773 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1774 SrcRegReductionPriorityQueue;
1776 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1777 HybridBURRPriorityQueue;
1779 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1780 ILPBURRPriorityQueue;
1781 } // end anonymous namespace
1783 //===----------------------------------------------------------------------===//
1784 // Static Node Priority for Register Pressure Reduction
1785 //===----------------------------------------------------------------------===//
1787 // Check for special nodes that bypass scheduling heuristics.
1788 // Currently this pushes TokenFactor nodes down, but may be used for other
1789 // pseudo-ops as well.
1791 // Return -1 to schedule right above left, 1 for left above right.
1792 // Return 0 if no bias exists.
1793 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1794 bool LSchedLow = left->isScheduleLow;
1795 bool RSchedLow = right->isScheduleLow;
1796 if (LSchedLow != RSchedLow)
1797 return LSchedLow < RSchedLow ? 1 : -1;
1801 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1802 /// Smaller number is the higher priority.
1804 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1805 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1806 if (SethiUllmanNumber != 0)
1807 return SethiUllmanNumber;
1810 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1812 if (I->isCtrl()) continue; // ignore chain preds
1813 SUnit *PredSU = I->getSUnit();
1814 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1815 if (PredSethiUllman > SethiUllmanNumber) {
1816 SethiUllmanNumber = PredSethiUllman;
1818 } else if (PredSethiUllman == SethiUllmanNumber)
1822 SethiUllmanNumber += Extra;
1824 if (SethiUllmanNumber == 0)
1825 SethiUllmanNumber = 1;
1827 return SethiUllmanNumber;
1830 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1831 /// scheduling units.
1832 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1833 SethiUllmanNumbers.assign(SUnits->size(), 0);
1835 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1836 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1839 void RegReductionPQBase::addNode(const SUnit *SU) {
1840 unsigned SUSize = SethiUllmanNumbers.size();
1841 if (SUnits->size() > SUSize)
1842 SethiUllmanNumbers.resize(SUSize*2, 0);
1843 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1846 void RegReductionPQBase::updateNode(const SUnit *SU) {
1847 SethiUllmanNumbers[SU->NodeNum] = 0;
1848 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1851 // Lower priority means schedule further down. For bottom-up scheduling, lower
1852 // priority SUs are scheduled before higher priority SUs.
1853 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1854 assert(SU->NodeNum < SethiUllmanNumbers.size());
1855 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1856 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1857 // CopyToReg should be close to its uses to facilitate coalescing and
1860 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1861 Opc == TargetOpcode::SUBREG_TO_REG ||
1862 Opc == TargetOpcode::INSERT_SUBREG)
1863 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1864 // close to their uses to facilitate coalescing.
1866 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1867 // If SU does not have a register use, i.e. it doesn't produce a value
1868 // that would be consumed (e.g. store), then it terminates a chain of
1869 // computation. Give it a large SethiUllman number so it will be
1870 // scheduled right before its predecessors that it doesn't lengthen
1871 // their live ranges.
1873 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1874 // If SU does not have a register def, schedule it close to its uses
1875 // because it does not lengthen any live ranges.
1878 return SethiUllmanNumbers[SU->NodeNum];
1880 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1882 // FIXME: This assumes all of the defs are used as call operands.
1883 int NP = (int)Priority - SU->getNode()->getNumValues();
1884 return (NP > 0) ? NP : 0;
1890 //===----------------------------------------------------------------------===//
1891 // Register Pressure Tracking
1892 //===----------------------------------------------------------------------===//
1894 void RegReductionPQBase::dumpRegPressure() const {
1895 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1896 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1897 E = TRI->regclass_end(); I != E; ++I) {
1898 const TargetRegisterClass *RC = *I;
1899 unsigned Id = RC->getID();
1900 unsigned RP = RegPressure[Id];
1902 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1908 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1912 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1916 SUnit *PredSU = I->getSUnit();
1917 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1918 // to cover the number of registers defined (they are all live).
1919 if (PredSU->NumRegDefsLeft == 0) {
1922 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1923 RegDefPos.IsValid(); RegDefPos.Advance()) {
1924 unsigned RCId, Cost;
1925 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1927 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1934 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1935 const SDNode *N = SU->getNode();
1937 if (!N->isMachineOpcode() || !SU->NumSuccs)
1940 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1941 for (unsigned i = 0; i != NumDefs; ++i) {
1942 EVT VT = N->getValueType(i);
1943 if (!N->hasAnyUseOfValue(i))
1945 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1946 if (RegPressure[RCId] >= RegLimit[RCId])
1952 // Compute the register pressure contribution by this instruction by count up
1953 // for uses that are not live and down for defs. Only count register classes
1954 // that are already under high pressure. As a side effect, compute the number of
1955 // uses of registers that are already live.
1957 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1958 // so could probably be factored.
1959 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1962 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1966 SUnit *PredSU = I->getSUnit();
1967 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1968 // to cover the number of registers defined (they are all live).
1969 if (PredSU->NumRegDefsLeft == 0) {
1970 if (PredSU->getNode()->isMachineOpcode())
1974 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1975 RegDefPos.IsValid(); RegDefPos.Advance()) {
1976 EVT VT = RegDefPos.GetValue();
1977 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1978 if (RegPressure[RCId] >= RegLimit[RCId])
1982 const SDNode *N = SU->getNode();
1984 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1987 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1988 for (unsigned i = 0; i != NumDefs; ++i) {
1989 EVT VT = N->getValueType(i);
1990 if (!N->hasAnyUseOfValue(i))
1992 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1993 if (RegPressure[RCId] >= RegLimit[RCId])
1999 void RegReductionPQBase::scheduledNode(SUnit *SU) {
2000 if (!TracksRegPressure)
2006 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2010 SUnit *PredSU = I->getSUnit();
2011 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2012 // to cover the number of registers defined (they are all live).
2013 if (PredSU->NumRegDefsLeft == 0) {
2016 // FIXME: The ScheduleDAG currently loses information about which of a
2017 // node's values is consumed by each dependence. Consequently, if the node
2018 // defines multiple register classes, we don't know which to pressurize
2019 // here. Instead the following loop consumes the register defs in an
2020 // arbitrary order. At least it handles the common case of clustered loads
2021 // to the same class. For precise liveness, each SDep needs to indicate the
2022 // result number. But that tightly couples the ScheduleDAG with the
2023 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2024 // value type or register class to SDep.
2026 // The most important aspect of register tracking is balancing the increase
2027 // here with the reduction further below. Note that this SU may use multiple
2028 // defs in PredSU. The can't be determined here, but we've already
2029 // compensated by reducing NumRegDefsLeft in PredSU during
2030 // ScheduleDAGSDNodes::AddSchedEdges.
2031 --PredSU->NumRegDefsLeft;
2032 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2033 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2034 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2038 unsigned RCId, Cost;
2039 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2040 RegPressure[RCId] += Cost;
2045 // We should have this assert, but there may be dead SDNodes that never
2046 // materialize as SUnits, so they don't appear to generate liveness.
2047 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2048 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2049 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2050 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2051 if (SkipRegDefs > 0)
2053 unsigned RCId, Cost;
2054 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2055 if (RegPressure[RCId] < Cost) {
2056 // Register pressure tracking is imprecise. This can happen. But we try
2057 // hard not to let it happen because it likely results in poor scheduling.
2058 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2059 RegPressure[RCId] = 0;
2062 RegPressure[RCId] -= Cost;
2068 void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2069 if (!TracksRegPressure)
2072 const SDNode *N = SU->getNode();
2075 if (!N->isMachineOpcode()) {
2076 if (N->getOpcode() != ISD::CopyToReg)
2079 unsigned Opc = N->getMachineOpcode();
2080 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2081 Opc == TargetOpcode::INSERT_SUBREG ||
2082 Opc == TargetOpcode::SUBREG_TO_REG ||
2083 Opc == TargetOpcode::REG_SEQUENCE ||
2084 Opc == TargetOpcode::IMPLICIT_DEF)
2088 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2092 SUnit *PredSU = I->getSUnit();
2093 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2094 // counts data deps.
2095 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2097 const SDNode *PN = PredSU->getNode();
2098 if (!PN->isMachineOpcode()) {
2099 if (PN->getOpcode() == ISD::CopyFromReg) {
2100 EVT VT = PN->getValueType(0);
2101 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2102 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2106 unsigned POpc = PN->getMachineOpcode();
2107 if (POpc == TargetOpcode::IMPLICIT_DEF)
2109 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2110 POpc == TargetOpcode::INSERT_SUBREG ||
2111 POpc == TargetOpcode::SUBREG_TO_REG) {
2112 EVT VT = PN->getValueType(0);
2113 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2114 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2117 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2118 for (unsigned i = 0; i != NumDefs; ++i) {
2119 EVT VT = PN->getValueType(i);
2120 if (!PN->hasAnyUseOfValue(i))
2122 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2123 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2124 // Register pressure tracking is imprecise. This can happen.
2125 RegPressure[RCId] = 0;
2127 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2131 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2132 // may transfer data dependencies to CopyToReg.
2133 if (SU->NumSuccs && N->isMachineOpcode()) {
2134 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2135 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2136 EVT VT = N->getValueType(i);
2137 if (VT == MVT::Glue || VT == MVT::Other)
2139 if (!N->hasAnyUseOfValue(i))
2141 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2142 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2149 //===----------------------------------------------------------------------===//
2150 // Dynamic Node Priority for Register Pressure Reduction
2151 //===----------------------------------------------------------------------===//
2153 /// closestSucc - Returns the scheduled cycle of the successor which is
2154 /// closest to the current cycle.
2155 static unsigned closestSucc(const SUnit *SU) {
2156 unsigned MaxHeight = 0;
2157 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2159 if (I->isCtrl()) continue; // ignore chain succs
2160 unsigned Height = I->getSUnit()->getHeight();
2161 // If there are bunch of CopyToRegs stacked up, they should be considered
2162 // to be at the same position.
2163 if (I->getSUnit()->getNode() &&
2164 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2165 Height = closestSucc(I->getSUnit())+1;
2166 if (Height > MaxHeight)
2172 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2173 /// for scratch registers, i.e. number of data dependencies.
2174 static unsigned calcMaxScratches(const SUnit *SU) {
2175 unsigned Scratches = 0;
2176 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2178 if (I->isCtrl()) continue; // ignore chain preds
2184 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2185 /// CopyFromReg from a virtual register.
2186 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2187 bool RetVal = false;
2188 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2190 if (I->isCtrl()) continue;
2191 const SUnit *PredSU = I->getSUnit();
2192 if (PredSU->getNode() &&
2193 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2195 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2196 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2206 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2207 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2208 /// it has no other use. It should be scheduled closer to the terminator.
2209 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2210 bool RetVal = false;
2211 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2213 if (I->isCtrl()) continue;
2214 const SUnit *SuccSU = I->getSUnit();
2215 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2217 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2218 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2228 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2229 // set isVRegCycle for its CopyFromReg operands.
2231 // This is only relevant for single-block loops, in which case the VRegCycle
2232 // node is likely an induction variable in which the operand and target virtual
2233 // registers should be coalesced (e.g. pre/post increment values). Setting the
2234 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2235 // CopyFromReg so that this node becomes the virtual register "kill". This
2236 // avoids interference between the values live in and out of the block and
2237 // eliminates a copy inside the loop.
2238 static void initVRegCycle(SUnit *SU) {
2239 if (DisableSchedVRegCycle)
2242 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2245 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2247 SU->isVRegCycle = true;
2249 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2251 if (I->isCtrl()) continue;
2252 I->getSUnit()->isVRegCycle = true;
2256 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2257 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2258 static void resetVRegCycle(SUnit *SU) {
2259 if (!SU->isVRegCycle)
2262 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2264 if (I->isCtrl()) continue; // ignore chain preds
2265 SUnit *PredSU = I->getSUnit();
2266 if (PredSU->isVRegCycle) {
2267 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2268 "VRegCycle def must be CopyFromReg");
2269 I->getSUnit()->isVRegCycle = 0;
2274 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2275 // means a node that defines the VRegCycle has not been scheduled yet.
2276 static bool hasVRegCycleUse(const SUnit *SU) {
2277 // If this SU also defines the VReg, don't hoist it as a "use".
2278 if (SU->isVRegCycle)
2281 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2283 if (I->isCtrl()) continue; // ignore chain preds
2284 if (I->getSUnit()->isVRegCycle &&
2285 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2286 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2293 // Check for either a dependence (latency) or resource (hazard) stall.
2295 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2296 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2297 if ((int)SPQ->getCurCycle() < Height) return true;
2298 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2299 != ScheduleHazardRecognizer::NoHazard)
2304 // Return -1 if left has higher priority, 1 if right has higher priority.
2305 // Return 0 if latency-based priority is equivalent.
2306 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2307 RegReductionPQBase *SPQ) {
2308 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2309 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2310 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2311 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2312 int LHeight = (int)left->getHeight() + LPenalty;
2313 int RHeight = (int)right->getHeight() + RPenalty;
2315 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2316 BUHasStall(left, LHeight, SPQ);
2317 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2318 BUHasStall(right, RHeight, SPQ);
2320 // If scheduling one of the node will cause a pipeline stall, delay it.
2321 // If scheduling either one of the node will cause a pipeline stall, sort
2322 // them according to their height.
2326 if (LHeight != RHeight)
2327 return LHeight > RHeight ? 1 : -1;
2331 // If either node is scheduling for latency, sort them by height/depth
2333 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2334 right->SchedulingPref == Sched::ILP)) {
2335 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2336 // is enabled, grouping instructions by cycle, then its height is already
2337 // covered so only its depth matters. We also reach this point if both stall
2338 // but have the same height.
2339 if (!SPQ->getHazardRec()->isEnabled()) {
2340 if (LHeight != RHeight)
2341 return LHeight > RHeight ? 1 : -1;
2343 int LDepth = left->getDepth() - LPenalty;
2344 int RDepth = right->getDepth() - RPenalty;
2345 if (LDepth != RDepth) {
2346 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2347 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2348 << ") depth " << RDepth << "\n");
2349 return LDepth < RDepth ? 1 : -1;
2351 if (left->Latency != right->Latency)
2352 return left->Latency > right->Latency ? 1 : -1;
2357 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2358 // Schedule physical register definitions close to their use. This is
2359 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2360 // long as shortening physreg live ranges is generally good, we can defer
2361 // creating a subtarget hook.
2362 if (!DisableSchedPhysRegJoin) {
2363 bool LHasPhysReg = left->hasPhysRegDefs;
2364 bool RHasPhysReg = right->hasPhysRegDefs;
2365 if (LHasPhysReg != RHasPhysReg) {
2367 const char *const PhysRegMsg[] = {" has no physreg"," defines a physreg"};
2369 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2370 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2371 << PhysRegMsg[RHasPhysReg] << "\n");
2372 return LHasPhysReg < RHasPhysReg;
2376 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2377 unsigned LPriority = SPQ->getNodePriority(left);
2378 unsigned RPriority = SPQ->getNodePriority(right);
2380 // Be really careful about hoisting call operands above previous calls.
2381 // Only allows it if it would reduce register pressure.
2382 if (left->isCall && right->isCallOp) {
2383 unsigned RNumVals = right->getNode()->getNumValues();
2384 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2386 if (right->isCall && left->isCallOp) {
2387 unsigned LNumVals = left->getNode()->getNumValues();
2388 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2391 if (LPriority != RPriority)
2392 return LPriority > RPriority;
2394 // One or both of the nodes are calls and their sethi-ullman numbers are the
2395 // same, then keep source order.
2396 if (left->isCall || right->isCall) {
2397 unsigned LOrder = SPQ->getNodeOrdering(left);
2398 unsigned ROrder = SPQ->getNodeOrdering(right);
2400 // Prefer an ordering where the lower the non-zero order number, the higher
2402 if ((LOrder || ROrder) && LOrder != ROrder)
2403 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2406 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2411 // and the following instructions are both ready.
2415 // Then schedule t2 = op first.
2422 // This creates more short live intervals.
2423 unsigned LDist = closestSucc(left);
2424 unsigned RDist = closestSucc(right);
2426 return LDist < RDist;
2428 // How many registers becomes live when the node is scheduled.
2429 unsigned LScratch = calcMaxScratches(left);
2430 unsigned RScratch = calcMaxScratches(right);
2431 if (LScratch != RScratch)
2432 return LScratch > RScratch;
2434 // Comparing latency against a call makes little sense unless the node
2435 // is register pressure-neutral.
2436 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2437 return (left->NodeQueueId > right->NodeQueueId);
2439 // Do not compare latencies when one or both of the nodes are calls.
2440 if (!DisableSchedCycles &&
2441 !(left->isCall || right->isCall)) {
2442 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2447 if (left->getHeight() != right->getHeight())
2448 return left->getHeight() > right->getHeight();
2450 if (left->getDepth() != right->getDepth())
2451 return left->getDepth() < right->getDepth();
2454 assert(left->NodeQueueId && right->NodeQueueId &&
2455 "NodeQueueId cannot be zero");
2456 return (left->NodeQueueId > right->NodeQueueId);
2460 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2461 if (int res = checkSpecialNodes(left, right))
2464 return BURRSort(left, right, SPQ);
2467 // Source order, otherwise bottom up.
2468 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2469 if (int res = checkSpecialNodes(left, right))
2472 unsigned LOrder = SPQ->getNodeOrdering(left);
2473 unsigned ROrder = SPQ->getNodeOrdering(right);
2475 // Prefer an ordering where the lower the non-zero order number, the higher
2477 if ((LOrder || ROrder) && LOrder != ROrder)
2478 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2480 return BURRSort(left, right, SPQ);
2483 // If the time between now and when the instruction will be ready can cover
2484 // the spill code, then avoid adding it to the ready queue. This gives long
2485 // stalls highest priority and allows hoisting across calls. It should also
2486 // speed up processing the available queue.
2487 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2488 static const unsigned ReadyDelay = 3;
2490 if (SPQ->MayReduceRegPressure(SU)) return true;
2492 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2494 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2495 != ScheduleHazardRecognizer::NoHazard)
2501 // Return true if right should be scheduled with higher priority than left.
2502 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2503 if (int res = checkSpecialNodes(left, right))
2506 if (left->isCall || right->isCall)
2507 // No way to compute latency of calls.
2508 return BURRSort(left, right, SPQ);
2510 bool LHigh = SPQ->HighRegPressure(left);
2511 bool RHigh = SPQ->HighRegPressure(right);
2512 // Avoid causing spills. If register pressure is high, schedule for
2513 // register pressure reduction.
2514 if (LHigh && !RHigh) {
2515 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2516 << right->NodeNum << ")\n");
2519 else if (!LHigh && RHigh) {
2520 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2521 << left->NodeNum << ")\n");
2524 if (!LHigh && !RHigh) {
2525 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2529 return BURRSort(left, right, SPQ);
2532 // Schedule as many instructions in each cycle as possible. So don't make an
2533 // instruction available unless it is ready in the current cycle.
2534 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2535 if (SU->getHeight() > CurCycle) return false;
2537 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2538 != ScheduleHazardRecognizer::NoHazard)
2544 static bool canEnableCoalescing(SUnit *SU) {
2545 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2546 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2547 // CopyToReg should be close to its uses to facilitate coalescing and
2551 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2552 Opc == TargetOpcode::SUBREG_TO_REG ||
2553 Opc == TargetOpcode::INSERT_SUBREG)
2554 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2555 // close to their uses to facilitate coalescing.
2558 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2559 // If SU does not have a register def, schedule it close to its uses
2560 // because it does not lengthen any live ranges.
2566 // list-ilp is currently an experimental scheduler that allows various
2567 // heuristics to be enabled prior to the normal register reduction logic.
2568 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2569 if (int res = checkSpecialNodes(left, right))
2572 if (left->isCall || right->isCall)
2573 // No way to compute latency of calls.
2574 return BURRSort(left, right, SPQ);
2576 unsigned LLiveUses = 0, RLiveUses = 0;
2577 int LPDiff = 0, RPDiff = 0;
2578 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2579 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2580 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2582 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2583 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2584 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2585 return LPDiff > RPDiff;
2588 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2589 bool LReduce = canEnableCoalescing(left);
2590 bool RReduce = canEnableCoalescing(right);
2591 if (LReduce && !RReduce) return false;
2592 if (RReduce && !LReduce) return true;
2595 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2596 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2597 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2598 return LLiveUses < RLiveUses;
2601 if (!DisableSchedStalls) {
2602 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2603 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2604 if (LStall != RStall)
2605 return left->getHeight() > right->getHeight();
2608 if (!DisableSchedCriticalPath) {
2609 int spread = (int)left->getDepth() - (int)right->getDepth();
2610 if (std::abs(spread) > MaxReorderWindow) {
2611 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2612 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2613 << right->getDepth() << "\n");
2614 return left->getDepth() < right->getDepth();
2618 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2619 int spread = (int)left->getHeight() - (int)right->getHeight();
2620 if (std::abs(spread) > MaxReorderWindow)
2621 return left->getHeight() > right->getHeight();
2624 return BURRSort(left, right, SPQ);
2627 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2629 // Add pseudo dependency edges for two-address nodes.
2630 if (!Disable2AddrHack)
2631 AddPseudoTwoAddrDeps();
2632 // Reroute edges to nodes with multiple uses.
2633 if (!TracksRegPressure && !SrcOrder)
2634 PrescheduleNodesWithMultipleUses();
2635 // Calculate node priorities.
2636 CalculateSethiUllmanNumbers();
2638 // For single block loops, mark nodes that look like canonical IV increments.
2639 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2640 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2641 initVRegCycle(&sunits[i]);
2646 //===----------------------------------------------------------------------===//
2647 // Preschedule for Register Pressure
2648 //===----------------------------------------------------------------------===//
2650 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2651 if (SU->isTwoAddress) {
2652 unsigned Opc = SU->getNode()->getMachineOpcode();
2653 const MCInstrDesc &MCID = TII->get(Opc);
2654 unsigned NumRes = MCID.getNumDefs();
2655 unsigned NumOps = MCID.getNumOperands() - NumRes;
2656 for (unsigned i = 0; i != NumOps; ++i) {
2657 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2658 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2659 if (DU->getNodeId() != -1 &&
2660 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2668 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2669 /// successor's explicit physregs whose definition can reach DepSU.
2670 /// i.e. DepSU should not be scheduled above SU.
2671 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2672 ScheduleDAGRRList *scheduleDAG,
2673 const TargetInstrInfo *TII,
2674 const TargetRegisterInfo *TRI) {
2675 const uint16_t *ImpDefs
2676 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2677 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2678 if(!ImpDefs && !RegMask)
2681 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2683 SUnit *SuccSU = SI->getSUnit();
2684 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2685 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2686 if (!PI->isAssignedRegDep())
2689 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2690 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2694 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2695 // Return true if SU clobbers this physical register use and the
2696 // definition of the register reaches from DepSU. IsReachable queries
2697 // a topological forward sort of the DAG (following the successors).
2698 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2699 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2706 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2707 /// physical register defs.
2708 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2709 const TargetInstrInfo *TII,
2710 const TargetRegisterInfo *TRI) {
2711 SDNode *N = SuccSU->getNode();
2712 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2713 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2714 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2715 for (const SDNode *SUNode = SU->getNode(); SUNode;
2716 SUNode = SUNode->getGluedNode()) {
2717 if (!SUNode->isMachineOpcode())
2719 const uint16_t *SUImpDefs =
2720 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2721 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2722 if (!SUImpDefs && !SURegMask)
2724 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2725 EVT VT = N->getValueType(i);
2726 if (VT == MVT::Glue || VT == MVT::Other)
2728 if (!N->hasAnyUseOfValue(i))
2730 unsigned Reg = ImpDefs[i - NumDefs];
2731 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2735 for (;*SUImpDefs; ++SUImpDefs) {
2736 unsigned SUReg = *SUImpDefs;
2737 if (TRI->regsOverlap(Reg, SUReg))
2745 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2746 /// are not handled well by the general register pressure reduction
2747 /// heuristics. When presented with code like this:
2756 /// the heuristics tend to push the store up, but since the
2757 /// operand of the store has another use (U), this would increase
2758 /// the length of that other use (the U->N edge).
2760 /// This function transforms code like the above to route U's
2761 /// dependence through the store when possible, like this:
2772 /// This results in the store being scheduled immediately
2773 /// after N, which shortens the U->N live range, reducing
2774 /// register pressure.
2776 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2777 // Visit all the nodes in topological order, working top-down.
2778 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2779 SUnit *SU = &(*SUnits)[i];
2780 // For now, only look at nodes with no data successors, such as stores.
2781 // These are especially important, due to the heuristics in
2782 // getNodePriority for nodes with no data successors.
2783 if (SU->NumSuccs != 0)
2785 // For now, only look at nodes with exactly one data predecessor.
2786 if (SU->NumPreds != 1)
2788 // Avoid prescheduling copies to virtual registers, which don't behave
2789 // like other nodes from the perspective of scheduling heuristics.
2790 if (SDNode *N = SU->getNode())
2791 if (N->getOpcode() == ISD::CopyToReg &&
2792 TargetRegisterInfo::isVirtualRegister
2793 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2796 // Locate the single data predecessor.
2798 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2799 EE = SU->Preds.end(); II != EE; ++II)
2800 if (!II->isCtrl()) {
2801 PredSU = II->getSUnit();
2806 // Don't rewrite edges that carry physregs, because that requires additional
2807 // support infrastructure.
2808 if (PredSU->hasPhysRegDefs)
2810 // Short-circuit the case where SU is PredSU's only data successor.
2811 if (PredSU->NumSuccs == 1)
2813 // Avoid prescheduling to copies from virtual registers, which don't behave
2814 // like other nodes from the perspective of scheduling heuristics.
2815 if (SDNode *N = SU->getNode())
2816 if (N->getOpcode() == ISD::CopyFromReg &&
2817 TargetRegisterInfo::isVirtualRegister
2818 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2821 // Perform checks on the successors of PredSU.
2822 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2823 EE = PredSU->Succs.end(); II != EE; ++II) {
2824 SUnit *PredSuccSU = II->getSUnit();
2825 if (PredSuccSU == SU) continue;
2826 // If PredSU has another successor with no data successors, for
2827 // now don't attempt to choose either over the other.
2828 if (PredSuccSU->NumSuccs == 0)
2829 goto outer_loop_continue;
2830 // Don't break physical register dependencies.
2831 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2832 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2833 goto outer_loop_continue;
2834 // Don't introduce graph cycles.
2835 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2836 goto outer_loop_continue;
2839 // Ok, the transformation is safe and the heuristics suggest it is
2840 // profitable. Update the graph.
2841 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2842 << " next to PredSU #" << PredSU->NodeNum
2843 << " to guide scheduling in the presence of multiple uses\n");
2844 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2845 SDep Edge = PredSU->Succs[i];
2846 assert(!Edge.isAssignedRegDep());
2847 SUnit *SuccSU = Edge.getSUnit();
2849 Edge.setSUnit(PredSU);
2850 scheduleDAG->RemovePred(SuccSU, Edge);
2851 scheduleDAG->AddPred(SU, Edge);
2853 scheduleDAG->AddPred(SuccSU, Edge);
2857 outer_loop_continue:;
2861 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2862 /// it as a def&use operand. Add a pseudo control edge from it to the other
2863 /// node (if it won't create a cycle) so the two-address one will be scheduled
2864 /// first (lower in the schedule). If both nodes are two-address, favor the
2865 /// one that has a CopyToReg use (more likely to be a loop induction update).
2866 /// If both are two-address, but one is commutable while the other is not
2867 /// commutable, favor the one that's not commutable.
2868 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2869 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2870 SUnit *SU = &(*SUnits)[i];
2871 if (!SU->isTwoAddress)
2874 SDNode *Node = SU->getNode();
2875 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2878 bool isLiveOut = hasOnlyLiveOutUses(SU);
2879 unsigned Opc = Node->getMachineOpcode();
2880 const MCInstrDesc &MCID = TII->get(Opc);
2881 unsigned NumRes = MCID.getNumDefs();
2882 unsigned NumOps = MCID.getNumOperands() - NumRes;
2883 for (unsigned j = 0; j != NumOps; ++j) {
2884 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2886 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2887 if (DU->getNodeId() == -1)
2889 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2890 if (!DUSU) continue;
2891 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2892 E = DUSU->Succs.end(); I != E; ++I) {
2893 if (I->isCtrl()) continue;
2894 SUnit *SuccSU = I->getSUnit();
2897 // Be conservative. Ignore if nodes aren't at roughly the same
2898 // depth and height.
2899 if (SuccSU->getHeight() < SU->getHeight() &&
2900 (SU->getHeight() - SuccSU->getHeight()) > 1)
2902 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2903 // constrains whatever is using the copy, instead of the copy
2904 // itself. In the case that the copy is coalesced, this
2905 // preserves the intent of the pseudo two-address heurietics.
2906 while (SuccSU->Succs.size() == 1 &&
2907 SuccSU->getNode()->isMachineOpcode() &&
2908 SuccSU->getNode()->getMachineOpcode() ==
2909 TargetOpcode::COPY_TO_REGCLASS)
2910 SuccSU = SuccSU->Succs.front().getSUnit();
2911 // Don't constrain non-instruction nodes.
2912 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2914 // Don't constrain nodes with physical register defs if the
2915 // predecessor can clobber them.
2916 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2917 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2920 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2921 // these may be coalesced away. We want them close to their uses.
2922 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2923 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2924 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2925 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2927 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2928 (!canClobber(SuccSU, DUSU) ||
2929 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2930 (!SU->isCommutable && SuccSU->isCommutable)) &&
2931 !scheduleDAG->IsReachable(SuccSU, SU)) {
2932 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2933 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2934 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2941 //===----------------------------------------------------------------------===//
2942 // Public Constructor Functions
2943 //===----------------------------------------------------------------------===//
2945 llvm::ScheduleDAGSDNodes *
2946 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2947 CodeGenOpt::Level OptLevel) {
2948 const TargetMachine &TM = IS->TM;
2949 const TargetInstrInfo *TII = TM.getInstrInfo();
2950 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2952 BURegReductionPriorityQueue *PQ =
2953 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, 0);
2954 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2955 PQ->setScheduleDAG(SD);
2959 llvm::ScheduleDAGSDNodes *
2960 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2961 CodeGenOpt::Level OptLevel) {
2962 const TargetMachine &TM = IS->TM;
2963 const TargetInstrInfo *TII = TM.getInstrInfo();
2964 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2966 SrcRegReductionPriorityQueue *PQ =
2967 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, 0);
2968 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2969 PQ->setScheduleDAG(SD);
2973 llvm::ScheduleDAGSDNodes *
2974 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2975 CodeGenOpt::Level OptLevel) {
2976 const TargetMachine &TM = IS->TM;
2977 const TargetInstrInfo *TII = TM.getInstrInfo();
2978 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2979 const TargetLowering *TLI = &IS->getTargetLowering();
2981 HybridBURRPriorityQueue *PQ =
2982 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
2984 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2985 PQ->setScheduleDAG(SD);
2989 llvm::ScheduleDAGSDNodes *
2990 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2991 CodeGenOpt::Level OptLevel) {
2992 const TargetMachine &TM = IS->TM;
2993 const TargetInstrInfo *TII = TM.getInstrInfo();
2994 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2995 const TargetLowering *TLI = &IS->getTargetLowering();
2997 ILPBURRPriorityQueue *PQ =
2998 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
2999 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3000 PQ->setScheduleDAG(SD);