1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "pre-RA-sched"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/InlineAsm.h"
21 #include "llvm/CodeGen/SchedulerRegistry.h"
22 #include "llvm/CodeGen/SelectionDAGISel.h"
23 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetData.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetLowering.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/Statistic.h"
31 #include "llvm/ADT/STLExtras.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
38 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
39 STATISTIC(NumUnfolds, "Number of nodes unfolded");
40 STATISTIC(NumDups, "Number of duplicated nodes");
41 STATISTIC(NumPRCopies, "Number of physical register copies");
43 static RegisterScheduler
44 burrListDAGScheduler("list-burr",
45 "Bottom-up register reduction list scheduling",
46 createBURRListDAGScheduler);
47 static RegisterScheduler
48 sourceListDAGScheduler("source",
49 "Similar to list-burr but schedules in source "
50 "order when possible",
51 createSourceListDAGScheduler);
53 static RegisterScheduler
54 hybridListDAGScheduler("list-hybrid",
55 "Bottom-up register pressure aware list scheduling "
56 "which tries to balance latency and register pressure",
57 createHybridListDAGScheduler);
59 static RegisterScheduler
60 ILPListDAGScheduler("list-ilp",
61 "Bottom-up register pressure aware list scheduling "
62 "which tries to balance ILP and register pressure",
63 createILPListDAGScheduler);
65 static cl::opt<bool> DisableSchedCycles(
66 "disable-sched-cycles", cl::Hidden, cl::init(false),
67 cl::desc("Disable cycle-level precision during preRA scheduling"));
69 // Temporary sched=list-ilp flags until the heuristics are robust.
70 // Some options are also available under sched=list-hybrid.
71 static cl::opt<bool> DisableSchedRegPressure(
72 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
73 cl::desc("Disable regpressure priority in sched=list-ilp"));
74 static cl::opt<bool> DisableSchedLiveUses(
75 "disable-sched-live-uses", cl::Hidden, cl::init(true),
76 cl::desc("Disable live use priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedVRegCycle(
78 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
79 cl::desc("Disable virtual register cycle interference checks"));
80 static cl::opt<bool> DisableSchedPhysRegJoin(
81 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
82 cl::desc("Disable physreg def-use affinity"));
83 static cl::opt<bool> DisableSchedStalls(
84 "disable-sched-stalls", cl::Hidden, cl::init(true),
85 cl::desc("Disable no-stall priority in sched=list-ilp"));
86 static cl::opt<bool> DisableSchedCriticalPath(
87 "disable-sched-critical-path", cl::Hidden, cl::init(false),
88 cl::desc("Disable critical path priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedHeight(
90 "disable-sched-height", cl::Hidden, cl::init(false),
91 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
93 static cl::opt<int> MaxReorderWindow(
94 "max-sched-reorder", cl::Hidden, cl::init(6),
95 cl::desc("Number of instructions to allow ahead of the critical path "
96 "in sched=list-ilp"));
98 static cl::opt<unsigned> AvgIPC(
99 "sched-avg-ipc", cl::Hidden, cl::init(1),
100 cl::desc("Average inst/cycle whan no target itinerary exists."));
104 // For sched=list-ilp, Count the number of times each factor comes into play.
105 enum { FactPressureDiff, FactRegUses, FactStall, FactHeight, FactDepth,
106 FactStatic, FactOther, NumFactors };
108 static const char *FactorName[NumFactors] =
109 {"PressureDiff", "RegUses", "Stall", "Height", "Depth","Static", "Other"};
110 static int FactorCount[NumFactors];
114 //===----------------------------------------------------------------------===//
115 /// ScheduleDAGRRList - The actual register reduction list scheduler
116 /// implementation. This supports both top-down and bottom-up scheduling.
118 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
120 /// NeedLatency - True if the scheduler will make use of latency information.
124 /// AvailableQueue - The priority queue to use for the available SUnits.
125 SchedulingPriorityQueue *AvailableQueue;
127 /// PendingQueue - This contains all of the instructions whose operands have
128 /// been issued, but their results are not ready yet (due to the latency of
129 /// the operation). Once the operands becomes available, the instruction is
130 /// added to the AvailableQueue.
131 std::vector<SUnit*> PendingQueue;
133 /// HazardRec - The hazard recognizer to use.
134 ScheduleHazardRecognizer *HazardRec;
136 /// CurCycle - The current scheduler state corresponds to this cycle.
139 /// MinAvailableCycle - Cycle of the soonest available instruction.
140 unsigned MinAvailableCycle;
142 /// IssueCount - Count instructions issued in this cycle
143 /// Currently valid only for bottom-up scheduling.
146 /// LiveRegDefs - A set of physical registers and their definition
147 /// that are "live". These nodes must be scheduled before any other nodes that
148 /// modifies the registers can be scheduled.
149 unsigned NumLiveRegs;
150 std::vector<SUnit*> LiveRegDefs;
151 std::vector<SUnit*> LiveRegGens;
153 /// Topo - A topological ordering for SUnits which permits fast IsReachable
154 /// and similar queries.
155 ScheduleDAGTopologicalSort Topo;
158 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
159 SchedulingPriorityQueue *availqueue,
160 CodeGenOpt::Level OptLevel)
161 : ScheduleDAGSDNodes(mf),
162 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
165 const TargetMachine &tm = mf.getTarget();
166 if (DisableSchedCycles || !NeedLatency)
167 HazardRec = new ScheduleHazardRecognizer();
169 HazardRec = tm.getInstrInfo()->CreateTargetHazardRecognizer(&tm, this);
172 ~ScheduleDAGRRList() {
174 delete AvailableQueue;
179 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
181 /// IsReachable - Checks if SU is reachable from TargetSU.
182 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
183 return Topo.IsReachable(SU, TargetSU);
186 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
188 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
189 return Topo.WillCreateCycle(SU, TargetSU);
192 /// AddPred - adds a predecessor edge to SUnit SU.
193 /// This returns true if this is a new predecessor.
194 /// Updates the topological ordering if required.
195 void AddPred(SUnit *SU, const SDep &D) {
196 Topo.AddPred(SU, D.getSUnit());
200 /// RemovePred - removes a predecessor edge from SUnit SU.
201 /// This returns true if an edge was removed.
202 /// Updates the topological ordering if required.
203 void RemovePred(SUnit *SU, const SDep &D) {
204 Topo.RemovePred(SU, D.getSUnit());
209 bool isReady(SUnit *SU) {
210 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
211 AvailableQueue->isReady(SU);
214 void ReleasePred(SUnit *SU, const SDep *PredEdge);
215 void ReleasePredecessors(SUnit *SU);
216 void ReleasePending();
217 void AdvanceToCycle(unsigned NextCycle);
218 void AdvancePastStalls(SUnit *SU);
219 void EmitNode(SUnit *SU);
220 void ScheduleNodeBottomUp(SUnit*);
221 void CapturePred(SDep *PredEdge);
222 void UnscheduleNodeBottomUp(SUnit*);
223 void RestoreHazardCheckerBottomUp();
224 void BacktrackBottomUp(SUnit*, SUnit*);
225 SUnit *CopyAndMoveSuccessors(SUnit*);
226 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
227 const TargetRegisterClass*,
228 const TargetRegisterClass*,
229 SmallVector<SUnit*, 2>&);
230 bool DelayForLiveRegsBottomUp(SUnit*, SmallVector<unsigned, 4>&);
232 SUnit *PickNodeToScheduleBottomUp();
233 void ListScheduleBottomUp();
235 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
236 /// Updates the topological ordering if required.
237 SUnit *CreateNewSUnit(SDNode *N) {
238 unsigned NumSUnits = SUnits.size();
239 SUnit *NewNode = NewSUnit(N);
240 // Update the topological ordering.
241 if (NewNode->NodeNum >= NumSUnits)
242 Topo.InitDAGTopologicalSorting();
246 /// CreateClone - Creates a new SUnit from an existing one.
247 /// Updates the topological ordering if required.
248 SUnit *CreateClone(SUnit *N) {
249 unsigned NumSUnits = SUnits.size();
250 SUnit *NewNode = Clone(N);
251 // Update the topological ordering.
252 if (NewNode->NodeNum >= NumSUnits)
253 Topo.InitDAGTopologicalSorting();
257 /// ForceUnitLatencies - Register-pressure-reducing scheduling doesn't
258 /// need actual latency information but the hybrid scheduler does.
259 bool ForceUnitLatencies() const {
263 } // end anonymous namespace
265 /// GetCostForDef - Looks up the register class and cost for a given definition.
266 /// Typically this just means looking up the representative register class,
267 /// but for untyped values (MVT::untyped) it means inspecting the node's
268 /// opcode to determine what register class is being generated.
269 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
270 const TargetLowering *TLI,
271 const TargetInstrInfo *TII,
272 const TargetRegisterInfo *TRI,
273 unsigned &RegClass, unsigned &Cost) {
274 EVT VT = RegDefPos.GetValue();
276 // Special handling for untyped values. These values can only come from
277 // the expansion of custom DAG-to-DAG patterns.
278 if (VT == MVT::untyped) {
279 const SDNode *Node = RegDefPos.GetNode();
280 unsigned Opcode = Node->getMachineOpcode();
282 if (Opcode == TargetOpcode::REG_SEQUENCE) {
283 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
284 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
285 RegClass = RC->getID();
290 unsigned Idx = RegDefPos.GetIdx();
291 const MCInstrDesc Desc = TII->get(Opcode);
292 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI);
293 RegClass = RC->getID();
294 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
295 // better way to determine it.
298 RegClass = TLI->getRepRegClassFor(VT)->getID();
299 Cost = TLI->getRepRegClassCostFor(VT);
303 /// Schedule - Schedule the DAG using list scheduling.
304 void ScheduleDAGRRList::Schedule() {
306 << "********** List Scheduling BB#" << BB->getNumber()
307 << " '" << BB->getName() << "' **********\n");
309 for (int i = 0; i < NumFactors; ++i) {
316 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
318 // Allocate slots for each physical register, plus one for a special register
319 // to track the virtual resource of a calling sequence.
320 LiveRegDefs.resize(TRI->getNumRegs() + 1, NULL);
321 LiveRegGens.resize(TRI->getNumRegs() + 1, NULL);
323 // Build the scheduling graph.
324 BuildSchedGraph(NULL);
326 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
327 SUnits[su].dumpAll(this));
328 Topo.InitDAGTopologicalSorting();
330 AvailableQueue->initNodes(SUnits);
334 // Execute the actual scheduling loop.
335 ListScheduleBottomUp();
338 for (int i = 0; i < NumFactors; ++i) {
339 DEBUG(dbgs() << FactorName[i] << "\t" << FactorCount[i] << "\n");
342 AvailableQueue->releaseState();
345 //===----------------------------------------------------------------------===//
346 // Bottom-Up Scheduling
347 //===----------------------------------------------------------------------===//
349 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
350 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
351 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
352 SUnit *PredSU = PredEdge->getSUnit();
355 if (PredSU->NumSuccsLeft == 0) {
356 dbgs() << "*** Scheduling failed! ***\n";
358 dbgs() << " has been released too many times!\n";
362 --PredSU->NumSuccsLeft;
364 if (!ForceUnitLatencies()) {
365 // Updating predecessor's height. This is now the cycle when the
366 // predecessor can be scheduled without causing a pipeline stall.
367 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
370 // If all the node's successors are scheduled, this node is ready
371 // to be scheduled. Ignore the special EntrySU node.
372 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
373 PredSU->isAvailable = true;
375 unsigned Height = PredSU->getHeight();
376 if (Height < MinAvailableCycle)
377 MinAvailableCycle = Height;
379 if (isReady(PredSU)) {
380 AvailableQueue->push(PredSU);
382 // CapturePred and others may have left the node in the pending queue, avoid
384 else if (!PredSU->isPending) {
385 PredSU->isPending = true;
386 PendingQueue.push_back(PredSU);
391 /// IsChainDependent - Test if Outer is reachable from Inner through
392 /// chain dependencies.
393 static bool IsChainDependent(SDNode *Outer, SDNode *Inner) {
398 if (N->getOpcode() == ISD::TokenFactor) {
399 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
400 if (IsChainDependent(N->getOperand(i).getNode(), Inner))
404 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
405 if (N->getOperand(i).getValueType() == MVT::Other) {
406 N = N->getOperand(i).getNode();
407 goto found_chain_operand;
410 found_chain_operand:;
411 if (N->getOpcode() == ISD::EntryToken)
416 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
417 /// the corresponding (lowered) CALLSEQ_BEGIN node.
419 /// NestLevel and MaxNested are used in recursion to indcate the current level
420 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
421 /// level seen so far.
423 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
424 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
426 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
427 const TargetInstrInfo *TII) {
429 // For a TokenFactor, examine each operand. There may be multiple ways
430 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
431 // most nesting in order to ensure that we find the corresponding match.
432 if (N->getOpcode() == ISD::TokenFactor) {
434 unsigned BestMaxNest = MaxNest;
435 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
436 unsigned MyNestLevel = NestLevel;
437 unsigned MyMaxNest = MaxNest;
438 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
439 MyNestLevel, MyMaxNest, TII))
440 if (!Best || (MyMaxNest > BestMaxNest)) {
442 BestMaxNest = MyMaxNest;
446 MaxNest = BestMaxNest;
449 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
450 if (N->isMachineOpcode()) {
451 if (N->getMachineOpcode() ==
452 (unsigned)TII->getCallFrameDestroyOpcode()) {
454 MaxNest = std::max(MaxNest, NestLevel);
455 } else if (N->getMachineOpcode() ==
456 (unsigned)TII->getCallFrameSetupOpcode()) {
462 // Otherwise, find the chain and continue climbing.
463 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
464 if (N->getOperand(i).getValueType() == MVT::Other) {
465 N = N->getOperand(i).getNode();
466 goto found_chain_operand;
469 found_chain_operand:;
470 if (N->getOpcode() == ISD::EntryToken)
475 /// Call ReleasePred for each predecessor, then update register live def/gen.
476 /// Always update LiveRegDefs for a register dependence even if the current SU
477 /// also defines the register. This effectively create one large live range
478 /// across a sequence of two-address node. This is important because the
479 /// entire chain must be scheduled together. Example:
482 /// flags = (2) addc flags
483 /// flags = (1) addc flags
487 /// LiveRegDefs[flags] = 3
488 /// LiveRegGens[flags] = 1
490 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
491 /// interference on flags.
492 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
493 // Bottom up: release predecessors
494 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
496 ReleasePred(SU, &*I);
497 if (I->isAssignedRegDep()) {
498 // This is a physical register dependency and it's impossible or
499 // expensive to copy the register. Make sure nothing that can
500 // clobber the register is scheduled between the predecessor and
502 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
503 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
504 "interference on register dependence");
505 LiveRegDefs[I->getReg()] = I->getSUnit();
506 if (!LiveRegGens[I->getReg()]) {
508 LiveRegGens[I->getReg()] = SU;
513 // If we're scheduling a lowered CALLSEQ_END, find the corresponding CALLSEQ_BEGIN.
514 // Inject an artificial physical register dependence between these nodes, to
515 // prevent other calls from being interscheduled with them.
516 unsigned CallResource = TRI->getNumRegs();
517 if (!LiveRegDefs[CallResource])
518 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
519 if (Node->isMachineOpcode() &&
520 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
521 unsigned NestLevel = 0;
522 unsigned MaxNest = 0;
523 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
525 SUnit *Def = &SUnits[N->getNodeId()];
527 LiveRegDefs[CallResource] = Def;
528 LiveRegGens[CallResource] = SU;
533 /// Check to see if any of the pending instructions are ready to issue. If
534 /// so, add them to the available queue.
535 void ScheduleDAGRRList::ReleasePending() {
536 if (DisableSchedCycles) {
537 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
541 // If the available queue is empty, it is safe to reset MinAvailableCycle.
542 if (AvailableQueue->empty())
543 MinAvailableCycle = UINT_MAX;
545 // Check to see if any of the pending instructions are ready to issue. If
546 // so, add them to the available queue.
547 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
548 unsigned ReadyCycle = PendingQueue[i]->getHeight();
549 if (ReadyCycle < MinAvailableCycle)
550 MinAvailableCycle = ReadyCycle;
552 if (PendingQueue[i]->isAvailable) {
553 if (!isReady(PendingQueue[i]))
555 AvailableQueue->push(PendingQueue[i]);
557 PendingQueue[i]->isPending = false;
558 PendingQueue[i] = PendingQueue.back();
559 PendingQueue.pop_back();
564 /// Move the scheduler state forward by the specified number of Cycles.
565 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
566 if (NextCycle <= CurCycle)
570 AvailableQueue->setCurCycle(NextCycle);
571 if (!HazardRec->isEnabled()) {
572 // Bypass lots of virtual calls in case of long latency.
573 CurCycle = NextCycle;
576 for (; CurCycle != NextCycle; ++CurCycle) {
577 HazardRec->RecedeCycle();
580 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
581 // available Q to release pending nodes at least once before popping.
585 /// Move the scheduler state forward until the specified node's dependents are
586 /// ready and can be scheduled with no resource conflicts.
587 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
588 if (DisableSchedCycles)
591 // FIXME: Nodes such as CopyFromReg probably should not advance the current
592 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
593 // has predecessors the cycle will be advanced when they are scheduled.
594 // But given the crude nature of modeling latency though such nodes, we
595 // currently need to treat these nodes like real instructions.
596 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
598 unsigned ReadyCycle = SU->getHeight();
600 // Bump CurCycle to account for latency. We assume the latency of other
601 // available instructions may be hidden by the stall (not a full pipe stall).
602 // This updates the hazard recognizer's cycle before reserving resources for
604 AdvanceToCycle(ReadyCycle);
606 // Calls are scheduled in their preceding cycle, so don't conflict with
607 // hazards from instructions after the call. EmitNode will reset the
608 // scoreboard state before emitting the call.
612 // FIXME: For resource conflicts in very long non-pipelined stages, we
613 // should probably skip ahead here to avoid useless scoreboard checks.
616 ScheduleHazardRecognizer::HazardType HT =
617 HazardRec->getHazardType(SU, -Stalls);
619 if (HT == ScheduleHazardRecognizer::NoHazard)
624 AdvanceToCycle(CurCycle + Stalls);
627 /// Record this SUnit in the HazardRecognizer.
628 /// Does not update CurCycle.
629 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
630 if (!HazardRec->isEnabled())
633 // Check for phys reg copy.
637 switch (SU->getNode()->getOpcode()) {
639 assert(SU->getNode()->isMachineOpcode() &&
640 "This target-independent node should not be scheduled.");
642 case ISD::MERGE_VALUES:
643 case ISD::TokenFactor:
645 case ISD::CopyFromReg:
647 // Noops don't affect the scoreboard state. Copies are likely to be
651 // For inline asm, clear the pipeline state.
656 // Calls are scheduled with their preceding instructions. For bottom-up
657 // scheduling, clear the pipeline state before emitting.
661 HazardRec->EmitInstruction(SU);
664 static void resetVRegCycle(SUnit *SU);
666 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
667 /// count of its predecessors. If a predecessor pending count is zero, add it to
668 /// the Available queue.
669 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
670 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
671 DEBUG(SU->dump(this));
674 if (CurCycle < SU->getHeight())
675 DEBUG(dbgs() << " Height [" << SU->getHeight()
676 << "] pipeline stall!\n");
679 // FIXME: Do not modify node height. It may interfere with
680 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
681 // node its ready cycle can aid heuristics, and after scheduling it can
682 // indicate the scheduled cycle.
683 SU->setHeightToAtLeast(CurCycle);
685 // Reserve resources for the scheduled intruction.
688 Sequence.push_back(SU);
690 AvailableQueue->ScheduledNode(SU);
692 // If HazardRec is disabled, and each inst counts as one cycle, then
693 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
694 // PendingQueue for schedulers that implement HasReadyFilter.
695 if (!HazardRec->isEnabled() && AvgIPC < 2)
696 AdvanceToCycle(CurCycle + 1);
698 // Update liveness of predecessors before successors to avoid treating a
699 // two-address node as a live range def.
700 ReleasePredecessors(SU);
702 // Release all the implicit physical register defs that are live.
703 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
705 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
706 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
707 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
709 LiveRegDefs[I->getReg()] = NULL;
710 LiveRegGens[I->getReg()] = NULL;
713 // Release the special call resource dependence, if this is the beginning
715 unsigned CallResource = TRI->getNumRegs();
716 if (LiveRegDefs[CallResource] == SU)
717 for (const SDNode *SUNode = SU->getNode(); SUNode;
718 SUNode = SUNode->getGluedNode()) {
719 if (SUNode->isMachineOpcode() &&
720 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
721 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
723 LiveRegDefs[CallResource] = NULL;
724 LiveRegGens[CallResource] = NULL;
730 SU->isScheduled = true;
732 // Conditions under which the scheduler should eagerly advance the cycle:
733 // (1) No available instructions
734 // (2) All pipelines full, so available instructions must have hazards.
736 // If HazardRec is disabled, the cycle was pre-advanced before calling
737 // ReleasePredecessors. In that case, IssueCount should remain 0.
739 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
740 if (HazardRec->isEnabled() || AvgIPC > 1) {
741 if (SU->getNode() && SU->getNode()->isMachineOpcode())
743 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
744 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
745 AdvanceToCycle(CurCycle + 1);
749 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
750 /// unscheduled, incrcease the succ left count of its predecessors. Remove
751 /// them from AvailableQueue if necessary.
752 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
753 SUnit *PredSU = PredEdge->getSUnit();
754 if (PredSU->isAvailable) {
755 PredSU->isAvailable = false;
756 if (!PredSU->isPending)
757 AvailableQueue->remove(PredSU);
760 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
761 ++PredSU->NumSuccsLeft;
764 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
765 /// its predecessor states to reflect the change.
766 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
767 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
768 DEBUG(SU->dump(this));
770 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
773 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
774 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
775 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
776 "Physical register dependency violated?");
778 LiveRegDefs[I->getReg()] = NULL;
779 LiveRegGens[I->getReg()] = NULL;
783 // Reclaim the special call resource dependence, if this is the beginning
785 unsigned CallResource = TRI->getNumRegs();
786 for (const SDNode *SUNode = SU->getNode(); SUNode;
787 SUNode = SUNode->getGluedNode()) {
788 if (SUNode->isMachineOpcode() &&
789 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
791 LiveRegDefs[CallResource] = SU;
792 LiveRegGens[CallResource] = NULL;
796 // Release the special call resource dependence, if this is the end
798 if (LiveRegGens[CallResource] == SU)
799 for (const SDNode *SUNode = SU->getNode(); SUNode;
800 SUNode = SUNode->getGluedNode()) {
801 if (SUNode->isMachineOpcode() &&
802 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
803 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
805 LiveRegDefs[CallResource] = NULL;
806 LiveRegGens[CallResource] = NULL;
810 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
812 if (I->isAssignedRegDep()) {
813 // This becomes the nearest def. Note that an earlier def may still be
814 // pending if this is a two-address node.
815 LiveRegDefs[I->getReg()] = SU;
816 if (!LiveRegDefs[I->getReg()]) {
819 if (LiveRegGens[I->getReg()] == NULL ||
820 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
821 LiveRegGens[I->getReg()] = I->getSUnit();
824 if (SU->getHeight() < MinAvailableCycle)
825 MinAvailableCycle = SU->getHeight();
827 SU->setHeightDirty();
828 SU->isScheduled = false;
829 SU->isAvailable = true;
830 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
831 // Don't make available until backtracking is complete.
832 SU->isPending = true;
833 PendingQueue.push_back(SU);
836 AvailableQueue->push(SU);
838 AvailableQueue->UnscheduledNode(SU);
841 /// After backtracking, the hazard checker needs to be restored to a state
842 /// corresponding the the current cycle.
843 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
846 unsigned LookAhead = std::min((unsigned)Sequence.size(),
847 HazardRec->getMaxLookAhead());
851 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
852 unsigned HazardCycle = (*I)->getHeight();
853 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
855 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
856 HazardRec->RecedeCycle();
862 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
863 /// BTCycle in order to schedule a specific node.
864 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
865 SUnit *OldSU = Sequence.back();
868 if (SU->isSucc(OldSU))
869 // Don't try to remove SU from AvailableQueue.
870 SU->isAvailable = false;
871 // FIXME: use ready cycle instead of height
872 CurCycle = OldSU->getHeight();
873 UnscheduleNodeBottomUp(OldSU);
874 AvailableQueue->setCurCycle(CurCycle);
877 OldSU = Sequence.back();
880 assert(!SU->isSucc(OldSU) && "Something is wrong!");
882 RestoreHazardCheckerBottomUp();
889 static bool isOperandOf(const SUnit *SU, SDNode *N) {
890 for (const SDNode *SUNode = SU->getNode(); SUNode;
891 SUNode = SUNode->getGluedNode()) {
892 if (SUNode->isOperandOf(N))
898 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
899 /// successors to the newly created node.
900 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
901 SDNode *N = SU->getNode();
905 if (SU->getNode()->getGluedNode())
909 bool TryUnfold = false;
910 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
911 EVT VT = N->getValueType(i);
914 else if (VT == MVT::Other)
917 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
918 const SDValue &Op = N->getOperand(i);
919 EVT VT = Op.getNode()->getValueType(Op.getResNo());
925 SmallVector<SDNode*, 2> NewNodes;
926 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
929 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
930 assert(NewNodes.size() == 2 && "Expected a load folding node!");
933 SDNode *LoadNode = NewNodes[0];
934 unsigned NumVals = N->getNumValues();
935 unsigned OldNumVals = SU->getNode()->getNumValues();
936 for (unsigned i = 0; i != NumVals; ++i)
937 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
938 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
939 SDValue(LoadNode, 1));
941 // LoadNode may already exist. This can happen when there is another
942 // load from the same location and producing the same type of value
943 // but it has different alignment or volatileness.
944 bool isNewLoad = true;
946 if (LoadNode->getNodeId() != -1) {
947 LoadSU = &SUnits[LoadNode->getNodeId()];
950 LoadSU = CreateNewSUnit(LoadNode);
951 LoadNode->setNodeId(LoadSU->NodeNum);
953 InitNumRegDefsLeft(LoadSU);
954 ComputeLatency(LoadSU);
957 SUnit *NewSU = CreateNewSUnit(N);
958 assert(N->getNodeId() == -1 && "Node already inserted!");
959 N->setNodeId(NewSU->NodeNum);
961 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
962 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
963 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
964 NewSU->isTwoAddress = true;
968 if (MCID.isCommutable())
969 NewSU->isCommutable = true;
971 InitNumRegDefsLeft(NewSU);
972 ComputeLatency(NewSU);
974 // Record all the edges to and from the old SU, by category.
975 SmallVector<SDep, 4> ChainPreds;
976 SmallVector<SDep, 4> ChainSuccs;
977 SmallVector<SDep, 4> LoadPreds;
978 SmallVector<SDep, 4> NodePreds;
979 SmallVector<SDep, 4> NodeSuccs;
980 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
983 ChainPreds.push_back(*I);
984 else if (isOperandOf(I->getSUnit(), LoadNode))
985 LoadPreds.push_back(*I);
987 NodePreds.push_back(*I);
989 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
992 ChainSuccs.push_back(*I);
994 NodeSuccs.push_back(*I);
997 // Now assign edges to the newly-created nodes.
998 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
999 const SDep &Pred = ChainPreds[i];
1000 RemovePred(SU, Pred);
1002 AddPred(LoadSU, Pred);
1004 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1005 const SDep &Pred = LoadPreds[i];
1006 RemovePred(SU, Pred);
1008 AddPred(LoadSU, Pred);
1010 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1011 const SDep &Pred = NodePreds[i];
1012 RemovePred(SU, Pred);
1013 AddPred(NewSU, Pred);
1015 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1016 SDep D = NodeSuccs[i];
1017 SUnit *SuccDep = D.getSUnit();
1019 RemovePred(SuccDep, D);
1021 AddPred(SuccDep, D);
1022 // Balance register pressure.
1023 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1024 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1025 --NewSU->NumRegDefsLeft;
1027 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1028 SDep D = ChainSuccs[i];
1029 SUnit *SuccDep = D.getSUnit();
1031 RemovePred(SuccDep, D);
1034 AddPred(SuccDep, D);
1038 // Add a data dependency to reflect that NewSU reads the value defined
1040 AddPred(NewSU, SDep(LoadSU, SDep::Data, LoadSU->Latency));
1043 AvailableQueue->addNode(LoadSU);
1044 AvailableQueue->addNode(NewSU);
1048 if (NewSU->NumSuccsLeft == 0) {
1049 NewSU->isAvailable = true;
1055 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1056 NewSU = CreateClone(SU);
1058 // New SUnit has the exact same predecessors.
1059 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1061 if (!I->isArtificial())
1064 // Only copy scheduled successors. Cut them from old node's successor
1065 // list and move them over.
1066 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1067 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1069 if (I->isArtificial())
1071 SUnit *SuccSU = I->getSUnit();
1072 if (SuccSU->isScheduled) {
1077 DelDeps.push_back(std::make_pair(SuccSU, D));
1080 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1081 RemovePred(DelDeps[i].first, DelDeps[i].second);
1083 AvailableQueue->updateNode(SU);
1084 AvailableQueue->addNode(NewSU);
1090 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1091 /// scheduled successors of the given SUnit to the last copy.
1092 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1093 const TargetRegisterClass *DestRC,
1094 const TargetRegisterClass *SrcRC,
1095 SmallVector<SUnit*, 2> &Copies) {
1096 SUnit *CopyFromSU = CreateNewSUnit(NULL);
1097 CopyFromSU->CopySrcRC = SrcRC;
1098 CopyFromSU->CopyDstRC = DestRC;
1100 SUnit *CopyToSU = CreateNewSUnit(NULL);
1101 CopyToSU->CopySrcRC = DestRC;
1102 CopyToSU->CopyDstRC = SrcRC;
1104 // Only copy scheduled successors. Cut them from old node's successor
1105 // list and move them over.
1106 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1107 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1109 if (I->isArtificial())
1111 SUnit *SuccSU = I->getSUnit();
1112 if (SuccSU->isScheduled) {
1114 D.setSUnit(CopyToSU);
1116 DelDeps.push_back(std::make_pair(SuccSU, *I));
1119 // Avoid scheduling the def-side copy before other successors. Otherwise
1120 // we could introduce another physreg interference on the copy and
1121 // continue inserting copies indefinitely.
1122 SDep D(CopyFromSU, SDep::Order, /*Latency=*/0,
1123 /*Reg=*/0, /*isNormalMemory=*/false,
1124 /*isMustAlias=*/false, /*isArtificial=*/true);
1128 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1129 RemovePred(DelDeps[i].first, DelDeps[i].second);
1131 AddPred(CopyFromSU, SDep(SU, SDep::Data, SU->Latency, Reg));
1132 AddPred(CopyToSU, SDep(CopyFromSU, SDep::Data, CopyFromSU->Latency, 0));
1134 AvailableQueue->updateNode(SU);
1135 AvailableQueue->addNode(CopyFromSU);
1136 AvailableQueue->addNode(CopyToSU);
1137 Copies.push_back(CopyFromSU);
1138 Copies.push_back(CopyToSU);
1143 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1144 /// definition of the specified node.
1145 /// FIXME: Move to SelectionDAG?
1146 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1147 const TargetInstrInfo *TII) {
1148 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1149 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1150 unsigned NumRes = MCID.getNumDefs();
1151 for (const unsigned *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1156 return N->getValueType(NumRes);
1159 /// CheckForLiveRegDef - Return true and update live register vector if the
1160 /// specified register def of the specified SUnit clobbers any "live" registers.
1161 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1162 std::vector<SUnit*> &LiveRegDefs,
1163 SmallSet<unsigned, 4> &RegAdded,
1164 SmallVector<unsigned, 4> &LRegs,
1165 const TargetRegisterInfo *TRI) {
1166 for (const unsigned *AliasI = TRI->getOverlaps(Reg); *AliasI; ++AliasI) {
1168 // Check if Ref is live.
1169 if (!LiveRegDefs[*AliasI]) continue;
1171 // Allow multiple uses of the same def.
1172 if (LiveRegDefs[*AliasI] == SU) continue;
1174 // Add Reg to the set of interfering live regs.
1175 if (RegAdded.insert(*AliasI)) {
1176 LRegs.push_back(*AliasI);
1181 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1182 /// scheduling of the given node to satisfy live physical register dependencies.
1183 /// If the specific node is the last one that's available to schedule, do
1184 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1185 bool ScheduleDAGRRList::
1186 DelayForLiveRegsBottomUp(SUnit *SU, SmallVector<unsigned, 4> &LRegs) {
1187 if (NumLiveRegs == 0)
1190 SmallSet<unsigned, 4> RegAdded;
1191 // If this node would clobber any "live" register, then it's not ready.
1193 // If SU is the currently live definition of the same register that it uses,
1194 // then we are free to schedule it.
1195 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1197 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1198 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1199 RegAdded, LRegs, TRI);
1202 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1203 if (Node->getOpcode() == ISD::INLINEASM) {
1204 // Inline asm can clobber physical defs.
1205 unsigned NumOps = Node->getNumOperands();
1206 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1207 --NumOps; // Ignore the glue operand.
1209 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1211 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1212 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1214 ++i; // Skip the ID value.
1215 if (InlineAsm::isRegDefKind(Flags) ||
1216 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1217 InlineAsm::isClobberKind(Flags)) {
1218 // Check for def of register or earlyclobber register.
1219 for (; NumVals; --NumVals, ++i) {
1220 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1221 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1222 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1230 if (!Node->isMachineOpcode())
1232 // If we're in the middle of scheduling a call, don't begin scheduling
1233 // another call. Also, don't allow any physical registers to be live across
1235 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1236 // Add one here so that we include the special calling-sequence resource.
1237 for (unsigned i = 0, e = TRI->getNumRegs() + 1; i != e; ++i)
1238 if (LiveRegDefs[i]) {
1239 SDNode *Gen = LiveRegGens[i]->getNode();
1240 while (SDNode *Glued = Gen->getGluedNode())
1242 if (!IsChainDependent(Gen, Node) && RegAdded.insert(i))
1247 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1248 if (!MCID.ImplicitDefs)
1250 for (const unsigned *Reg = MCID.ImplicitDefs; *Reg; ++Reg)
1251 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1254 return !LRegs.empty();
1257 /// Return a node that can be scheduled in this cycle. Requirements:
1258 /// (1) Ready: latency has been satisfied
1259 /// (2) No Hazards: resources are available
1260 /// (3) No Interferences: may unschedule to break register interferences.
1261 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1262 SmallVector<SUnit*, 4> Interferences;
1263 DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMap;
1265 SUnit *CurSU = AvailableQueue->pop();
1267 SmallVector<unsigned, 4> LRegs;
1268 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1270 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1272 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1273 Interferences.push_back(CurSU);
1274 CurSU = AvailableQueue->pop();
1277 // Add the nodes that aren't ready back onto the available list.
1278 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1279 Interferences[i]->isPending = false;
1280 assert(Interferences[i]->isAvailable && "must still be available");
1281 AvailableQueue->push(Interferences[i]);
1286 // All candidates are delayed due to live physical reg dependencies.
1287 // Try backtracking, code duplication, or inserting cross class copies
1289 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1290 SUnit *TrySU = Interferences[i];
1291 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1293 // Try unscheduling up to the point where it's safe to schedule
1296 unsigned LiveCycle = UINT_MAX;
1297 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1298 unsigned Reg = LRegs[j];
1299 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1300 BtSU = LiveRegGens[Reg];
1301 LiveCycle = BtSU->getHeight();
1304 if (!WillCreateCycle(TrySU, BtSU)) {
1305 BacktrackBottomUp(TrySU, BtSU);
1307 // Force the current node to be scheduled before the node that
1308 // requires the physical reg dep.
1309 if (BtSU->isAvailable) {
1310 BtSU->isAvailable = false;
1311 if (!BtSU->isPending)
1312 AvailableQueue->remove(BtSU);
1314 AddPred(TrySU, SDep(BtSU, SDep::Order, /*Latency=*/1,
1315 /*Reg=*/0, /*isNormalMemory=*/false,
1316 /*isMustAlias=*/false, /*isArtificial=*/true));
1318 // If one or more successors has been unscheduled, then the current
1319 // node is no longer avaialable. Schedule a successor that's now
1320 // available instead.
1321 if (!TrySU->isAvailable) {
1322 CurSU = AvailableQueue->pop();
1326 TrySU->isPending = false;
1327 Interferences.erase(Interferences.begin()+i);
1334 // Can't backtrack. If it's too expensive to copy the value, then try
1335 // duplicate the nodes that produces these "too expensive to copy"
1336 // values to break the dependency. In case even that doesn't work,
1337 // insert cross class copies.
1338 // If it's not too expensive, i.e. cost != -1, issue copies.
1339 SUnit *TrySU = Interferences[0];
1340 SmallVector<unsigned, 4> &LRegs = LRegsMap[TrySU];
1341 assert(LRegs.size() == 1 && "Can't handle this yet!");
1342 unsigned Reg = LRegs[0];
1343 SUnit *LRDef = LiveRegDefs[Reg];
1344 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1345 const TargetRegisterClass *RC =
1346 TRI->getMinimalPhysRegClass(Reg, VT);
1347 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1349 // If cross copy register class is the same as RC, then it must be possible
1350 // copy the value directly. Do not try duplicate the def.
1351 // If cross copy register class is not the same as RC, then it's possible to
1352 // copy the value but it require cross register class copies and it is
1354 // If cross copy register class is null, then it's not possible to copy
1355 // the value at all.
1358 NewDef = CopyAndMoveSuccessors(LRDef);
1359 if (!DestRC && !NewDef)
1360 report_fatal_error("Can't handle live physical register dependency!");
1363 // Issue copies, these can be expensive cross register class copies.
1364 SmallVector<SUnit*, 2> Copies;
1365 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1366 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1367 << " to SU #" << Copies.front()->NodeNum << "\n");
1368 AddPred(TrySU, SDep(Copies.front(), SDep::Order, /*Latency=*/1,
1369 /*Reg=*/0, /*isNormalMemory=*/false,
1370 /*isMustAlias=*/false,
1371 /*isArtificial=*/true));
1372 NewDef = Copies.back();
1375 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1376 << " to SU #" << TrySU->NodeNum << "\n");
1377 LiveRegDefs[Reg] = NewDef;
1378 AddPred(NewDef, SDep(TrySU, SDep::Order, /*Latency=*/1,
1379 /*Reg=*/0, /*isNormalMemory=*/false,
1380 /*isMustAlias=*/false,
1381 /*isArtificial=*/true));
1382 TrySU->isAvailable = false;
1386 assert(CurSU && "Unable to resolve live physical register dependencies!");
1388 // Add the nodes that aren't ready back onto the available list.
1389 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1390 Interferences[i]->isPending = false;
1391 // May no longer be available due to backtracking.
1392 if (Interferences[i]->isAvailable) {
1393 AvailableQueue->push(Interferences[i]);
1399 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1401 void ScheduleDAGRRList::ListScheduleBottomUp() {
1402 // Release any predecessors of the special Exit node.
1403 ReleasePredecessors(&ExitSU);
1405 // Add root to Available queue.
1406 if (!SUnits.empty()) {
1407 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1408 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1409 RootSU->isAvailable = true;
1410 AvailableQueue->push(RootSU);
1413 // While Available queue is not empty, grab the node with the highest
1414 // priority. If it is not ready put it back. Schedule the node.
1415 Sequence.reserve(SUnits.size());
1416 while (!AvailableQueue->empty()) {
1417 DEBUG(dbgs() << "\nExamining Available:\n";
1418 AvailableQueue->dump(this));
1420 // Pick the best node to schedule taking all constraints into
1422 SUnit *SU = PickNodeToScheduleBottomUp();
1424 AdvancePastStalls(SU);
1426 ScheduleNodeBottomUp(SU);
1428 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1429 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1430 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1431 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1435 // Reverse the order if it is bottom up.
1436 std::reverse(Sequence.begin(), Sequence.end());
1439 VerifySchedule(/*isBottomUp=*/true);
1443 //===----------------------------------------------------------------------===//
1444 // RegReductionPriorityQueue Definition
1445 //===----------------------------------------------------------------------===//
1447 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1448 // to reduce register pressure.
1451 class RegReductionPQBase;
1453 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1454 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1459 struct reverse_sort : public queue_sort {
1461 reverse_sort(SF &sf) : SortFunc(sf) {}
1462 reverse_sort(const reverse_sort &RHS) : SortFunc(RHS.SortFunc) {}
1464 bool operator()(SUnit* left, SUnit* right) const {
1465 // reverse left/right rather than simply !SortFunc(left, right)
1466 // to expose different paths in the comparison logic.
1467 return SortFunc(right, left);
1472 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1473 // reduction scheduler.
1474 struct bu_ls_rr_sort : public queue_sort {
1477 HasReadyFilter = false
1480 RegReductionPQBase *SPQ;
1481 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1482 bu_ls_rr_sort(const bu_ls_rr_sort &RHS) : SPQ(RHS.SPQ) {}
1484 bool operator()(SUnit* left, SUnit* right) const;
1487 // src_ls_rr_sort - Priority function for source order scheduler.
1488 struct src_ls_rr_sort : public queue_sort {
1491 HasReadyFilter = false
1494 RegReductionPQBase *SPQ;
1495 src_ls_rr_sort(RegReductionPQBase *spq)
1497 src_ls_rr_sort(const src_ls_rr_sort &RHS)
1500 bool operator()(SUnit* left, SUnit* right) const;
1503 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1504 struct hybrid_ls_rr_sort : public queue_sort {
1507 HasReadyFilter = false
1510 RegReductionPQBase *SPQ;
1511 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1513 hybrid_ls_rr_sort(const hybrid_ls_rr_sort &RHS)
1516 bool isReady(SUnit *SU, unsigned CurCycle) const;
1518 bool operator()(SUnit* left, SUnit* right) const;
1521 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1523 struct ilp_ls_rr_sort : public queue_sort {
1526 HasReadyFilter = false
1529 RegReductionPQBase *SPQ;
1530 ilp_ls_rr_sort(RegReductionPQBase *spq)
1532 ilp_ls_rr_sort(const ilp_ls_rr_sort &RHS)
1535 bool isReady(SUnit *SU, unsigned CurCycle) const;
1537 bool operator()(SUnit* left, SUnit* right) const;
1540 class RegReductionPQBase : public SchedulingPriorityQueue {
1542 std::vector<SUnit*> Queue;
1543 unsigned CurQueueId;
1544 bool TracksRegPressure;
1546 // SUnits - The SUnits for the current graph.
1547 std::vector<SUnit> *SUnits;
1549 MachineFunction &MF;
1550 const TargetInstrInfo *TII;
1551 const TargetRegisterInfo *TRI;
1552 const TargetLowering *TLI;
1553 ScheduleDAGRRList *scheduleDAG;
1555 // SethiUllmanNumbers - The SethiUllman number for each node.
1556 std::vector<unsigned> SethiUllmanNumbers;
1558 /// RegPressure - Tracking current reg pressure per register class.
1560 std::vector<unsigned> RegPressure;
1562 /// RegLimit - Tracking the number of allocatable registers per register
1564 std::vector<unsigned> RegLimit;
1567 RegReductionPQBase(MachineFunction &mf,
1568 bool hasReadyFilter,
1570 const TargetInstrInfo *tii,
1571 const TargetRegisterInfo *tri,
1572 const TargetLowering *tli)
1573 : SchedulingPriorityQueue(hasReadyFilter),
1574 CurQueueId(0), TracksRegPressure(tracksrp),
1575 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(NULL) {
1576 if (TracksRegPressure) {
1577 unsigned NumRC = TRI->getNumRegClasses();
1578 RegLimit.resize(NumRC);
1579 RegPressure.resize(NumRC);
1580 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1581 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1582 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1583 E = TRI->regclass_end(); I != E; ++I)
1584 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1588 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1589 scheduleDAG = scheduleDag;
1592 ScheduleHazardRecognizer* getHazardRec() {
1593 return scheduleDAG->getHazardRec();
1596 void initNodes(std::vector<SUnit> &sunits);
1598 void addNode(const SUnit *SU);
1600 void updateNode(const SUnit *SU);
1602 void releaseState() {
1604 SethiUllmanNumbers.clear();
1605 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1608 unsigned getNodePriority(const SUnit *SU) const;
1610 unsigned getNodeOrdering(const SUnit *SU) const {
1611 if (!SU->getNode()) return 0;
1613 return scheduleDAG->DAG->GetOrdering(SU->getNode());
1616 bool empty() const { return Queue.empty(); }
1618 void push(SUnit *U) {
1619 assert(!U->NodeQueueId && "Node in the queue already");
1620 U->NodeQueueId = ++CurQueueId;
1624 void remove(SUnit *SU) {
1625 assert(!Queue.empty() && "Queue is empty!");
1626 assert(SU->NodeQueueId != 0 && "Not in queue!");
1627 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1629 if (I != prior(Queue.end()))
1630 std::swap(*I, Queue.back());
1632 SU->NodeQueueId = 0;
1635 bool tracksRegPressure() const { return TracksRegPressure; }
1637 void dumpRegPressure() const;
1639 bool HighRegPressure(const SUnit *SU) const;
1641 bool MayReduceRegPressure(SUnit *SU) const;
1643 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1645 void ScheduledNode(SUnit *SU);
1647 void UnscheduledNode(SUnit *SU);
1650 bool canClobber(const SUnit *SU, const SUnit *Op);
1651 void AddPseudoTwoAddrDeps();
1652 void PrescheduleNodesWithMultipleUses();
1653 void CalculateSethiUllmanNumbers();
1657 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1658 std::vector<SUnit *>::iterator Best = Q.begin();
1659 for (std::vector<SUnit *>::iterator I = llvm::next(Q.begin()),
1660 E = Q.end(); I != E; ++I)
1661 if (Picker(*Best, *I))
1664 if (Best != prior(Q.end()))
1665 std::swap(*Best, Q.back());
1671 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1673 if (DAG->StressSched) {
1674 reverse_sort<SF> RPicker(Picker);
1675 return popFromQueueImpl(Q, RPicker);
1679 return popFromQueueImpl(Q, Picker);
1683 class RegReductionPriorityQueue : public RegReductionPQBase {
1687 RegReductionPriorityQueue(MachineFunction &mf,
1689 const TargetInstrInfo *tii,
1690 const TargetRegisterInfo *tri,
1691 const TargetLowering *tli)
1692 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, tii, tri, tli),
1695 bool isBottomUp() const { return SF::IsBottomUp; }
1697 bool isReady(SUnit *U) const {
1698 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1702 if (Queue.empty()) return NULL;
1704 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1709 void dump(ScheduleDAG *DAG) const {
1710 // Emulate pop() without clobbering NodeQueueIds.
1711 std::vector<SUnit*> DumpQueue = Queue;
1712 SF DumpPicker = Picker;
1713 while (!DumpQueue.empty()) {
1714 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1715 dbgs() << "Height " << SU->getHeight() << ": ";
1721 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1722 BURegReductionPriorityQueue;
1724 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1725 SrcRegReductionPriorityQueue;
1727 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1728 HybridBURRPriorityQueue;
1730 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1731 ILPBURRPriorityQueue;
1732 } // end anonymous namespace
1734 //===----------------------------------------------------------------------===//
1735 // Static Node Priority for Register Pressure Reduction
1736 //===----------------------------------------------------------------------===//
1738 // Check for special nodes that bypass scheduling heuristics.
1739 // Currently this pushes TokenFactor nodes down, but may be used for other
1740 // pseudo-ops as well.
1742 // Return -1 to schedule right above left, 1 for left above right.
1743 // Return 0 if no bias exists.
1744 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1745 bool LSchedLow = left->isScheduleLow;
1746 bool RSchedLow = right->isScheduleLow;
1747 if (LSchedLow != RSchedLow)
1748 return LSchedLow < RSchedLow ? 1 : -1;
1752 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1753 /// Smaller number is the higher priority.
1755 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1756 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1757 if (SethiUllmanNumber != 0)
1758 return SethiUllmanNumber;
1761 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1763 if (I->isCtrl()) continue; // ignore chain preds
1764 SUnit *PredSU = I->getSUnit();
1765 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1766 if (PredSethiUllman > SethiUllmanNumber) {
1767 SethiUllmanNumber = PredSethiUllman;
1769 } else if (PredSethiUllman == SethiUllmanNumber)
1773 SethiUllmanNumber += Extra;
1775 if (SethiUllmanNumber == 0)
1776 SethiUllmanNumber = 1;
1778 return SethiUllmanNumber;
1781 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1782 /// scheduling units.
1783 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1784 SethiUllmanNumbers.assign(SUnits->size(), 0);
1786 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1787 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1790 void RegReductionPQBase::addNode(const SUnit *SU) {
1791 unsigned SUSize = SethiUllmanNumbers.size();
1792 if (SUnits->size() > SUSize)
1793 SethiUllmanNumbers.resize(SUSize*2, 0);
1794 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1797 void RegReductionPQBase::updateNode(const SUnit *SU) {
1798 SethiUllmanNumbers[SU->NodeNum] = 0;
1799 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1802 // Lower priority means schedule further down. For bottom-up scheduling, lower
1803 // priority SUs are scheduled before higher priority SUs.
1804 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1805 assert(SU->NodeNum < SethiUllmanNumbers.size());
1806 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1807 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1808 // CopyToReg should be close to its uses to facilitate coalescing and
1811 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1812 Opc == TargetOpcode::SUBREG_TO_REG ||
1813 Opc == TargetOpcode::INSERT_SUBREG)
1814 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1815 // close to their uses to facilitate coalescing.
1817 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1818 // If SU does not have a register use, i.e. it doesn't produce a value
1819 // that would be consumed (e.g. store), then it terminates a chain of
1820 // computation. Give it a large SethiUllman number so it will be
1821 // scheduled right before its predecessors that it doesn't lengthen
1822 // their live ranges.
1824 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1825 // If SU does not have a register def, schedule it close to its uses
1826 // because it does not lengthen any live ranges.
1829 return SethiUllmanNumbers[SU->NodeNum];
1831 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1833 // FIXME: This assumes all of the defs are used as call operands.
1834 int NP = (int)Priority - SU->getNode()->getNumValues();
1835 return (NP > 0) ? NP : 0;
1841 //===----------------------------------------------------------------------===//
1842 // Register Pressure Tracking
1843 //===----------------------------------------------------------------------===//
1845 void RegReductionPQBase::dumpRegPressure() const {
1846 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1847 E = TRI->regclass_end(); I != E; ++I) {
1848 const TargetRegisterClass *RC = *I;
1849 unsigned Id = RC->getID();
1850 unsigned RP = RegPressure[Id];
1852 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1857 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1861 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1865 SUnit *PredSU = I->getSUnit();
1866 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1867 // to cover the number of registers defined (they are all live).
1868 if (PredSU->NumRegDefsLeft == 0) {
1871 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1872 RegDefPos.IsValid(); RegDefPos.Advance()) {
1873 unsigned RCId, Cost;
1874 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1876 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1883 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1884 const SDNode *N = SU->getNode();
1886 if (!N->isMachineOpcode() || !SU->NumSuccs)
1889 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1890 for (unsigned i = 0; i != NumDefs; ++i) {
1891 EVT VT = N->getValueType(i);
1892 if (!N->hasAnyUseOfValue(i))
1894 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1895 if (RegPressure[RCId] >= RegLimit[RCId])
1901 // Compute the register pressure contribution by this instruction by count up
1902 // for uses that are not live and down for defs. Only count register classes
1903 // that are already under high pressure. As a side effect, compute the number of
1904 // uses of registers that are already live.
1906 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1907 // so could probably be factored.
1908 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1911 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1915 SUnit *PredSU = I->getSUnit();
1916 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1917 // to cover the number of registers defined (they are all live).
1918 if (PredSU->NumRegDefsLeft == 0) {
1919 if (PredSU->getNode()->isMachineOpcode())
1923 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1924 RegDefPos.IsValid(); RegDefPos.Advance()) {
1925 EVT VT = RegDefPos.GetValue();
1926 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1927 if (RegPressure[RCId] >= RegLimit[RCId])
1931 const SDNode *N = SU->getNode();
1933 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
1936 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1937 for (unsigned i = 0; i != NumDefs; ++i) {
1938 EVT VT = N->getValueType(i);
1939 if (!N->hasAnyUseOfValue(i))
1941 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1942 if (RegPressure[RCId] >= RegLimit[RCId])
1948 void RegReductionPQBase::ScheduledNode(SUnit *SU) {
1949 if (!TracksRegPressure)
1955 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1959 SUnit *PredSU = I->getSUnit();
1960 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1961 // to cover the number of registers defined (they are all live).
1962 if (PredSU->NumRegDefsLeft == 0) {
1965 // FIXME: The ScheduleDAG currently loses information about which of a
1966 // node's values is consumed by each dependence. Consequently, if the node
1967 // defines multiple register classes, we don't know which to pressurize
1968 // here. Instead the following loop consumes the register defs in an
1969 // arbitrary order. At least it handles the common case of clustered loads
1970 // to the same class. For precise liveness, each SDep needs to indicate the
1971 // result number. But that tightly couples the ScheduleDAG with the
1972 // SelectionDAG making updates tricky. A simpler hack would be to attach a
1973 // value type or register class to SDep.
1975 // The most important aspect of register tracking is balancing the increase
1976 // here with the reduction further below. Note that this SU may use multiple
1977 // defs in PredSU. The can't be determined here, but we've already
1978 // compensated by reducing NumRegDefsLeft in PredSU during
1979 // ScheduleDAGSDNodes::AddSchedEdges.
1980 --PredSU->NumRegDefsLeft;
1981 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
1982 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1983 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
1987 unsigned RCId, Cost;
1988 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
1989 RegPressure[RCId] += Cost;
1994 // We should have this assert, but there may be dead SDNodes that never
1995 // materialize as SUnits, so they don't appear to generate liveness.
1996 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
1997 int SkipRegDefs = (int)SU->NumRegDefsLeft;
1998 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
1999 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2000 if (SkipRegDefs > 0)
2002 unsigned RCId, Cost;
2003 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost);
2004 if (RegPressure[RCId] < Cost) {
2005 // Register pressure tracking is imprecise. This can happen. But we try
2006 // hard not to let it happen because it likely results in poor scheduling.
2007 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2008 RegPressure[RCId] = 0;
2011 RegPressure[RCId] -= Cost;
2017 void RegReductionPQBase::UnscheduledNode(SUnit *SU) {
2018 if (!TracksRegPressure)
2021 const SDNode *N = SU->getNode();
2024 if (!N->isMachineOpcode()) {
2025 if (N->getOpcode() != ISD::CopyToReg)
2028 unsigned Opc = N->getMachineOpcode();
2029 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2030 Opc == TargetOpcode::INSERT_SUBREG ||
2031 Opc == TargetOpcode::SUBREG_TO_REG ||
2032 Opc == TargetOpcode::REG_SEQUENCE ||
2033 Opc == TargetOpcode::IMPLICIT_DEF)
2037 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2041 SUnit *PredSU = I->getSUnit();
2042 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2043 // counts data deps.
2044 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2046 const SDNode *PN = PredSU->getNode();
2047 if (!PN->isMachineOpcode()) {
2048 if (PN->getOpcode() == ISD::CopyFromReg) {
2049 EVT VT = PN->getValueType(0);
2050 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2051 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2055 unsigned POpc = PN->getMachineOpcode();
2056 if (POpc == TargetOpcode::IMPLICIT_DEF)
2058 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2059 POpc == TargetOpcode::INSERT_SUBREG ||
2060 POpc == TargetOpcode::SUBREG_TO_REG) {
2061 EVT VT = PN->getValueType(0);
2062 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2063 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2066 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2067 for (unsigned i = 0; i != NumDefs; ++i) {
2068 EVT VT = PN->getValueType(i);
2069 if (!PN->hasAnyUseOfValue(i))
2071 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2072 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2073 // Register pressure tracking is imprecise. This can happen.
2074 RegPressure[RCId] = 0;
2076 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2080 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2081 // may transfer data dependencies to CopyToReg.
2082 if (SU->NumSuccs && N->isMachineOpcode()) {
2083 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2084 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2085 EVT VT = N->getValueType(i);
2086 if (VT == MVT::Glue || VT == MVT::Other)
2088 if (!N->hasAnyUseOfValue(i))
2090 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2091 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2098 //===----------------------------------------------------------------------===//
2099 // Dynamic Node Priority for Register Pressure Reduction
2100 //===----------------------------------------------------------------------===//
2102 /// closestSucc - Returns the scheduled cycle of the successor which is
2103 /// closest to the current cycle.
2104 static unsigned closestSucc(const SUnit *SU) {
2105 unsigned MaxHeight = 0;
2106 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2108 if (I->isCtrl()) continue; // ignore chain succs
2109 unsigned Height = I->getSUnit()->getHeight();
2110 // If there are bunch of CopyToRegs stacked up, they should be considered
2111 // to be at the same position.
2112 if (I->getSUnit()->getNode() &&
2113 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2114 Height = closestSucc(I->getSUnit())+1;
2115 if (Height > MaxHeight)
2121 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2122 /// for scratch registers, i.e. number of data dependencies.
2123 static unsigned calcMaxScratches(const SUnit *SU) {
2124 unsigned Scratches = 0;
2125 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2127 if (I->isCtrl()) continue; // ignore chain preds
2133 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2134 /// CopyFromReg from a virtual register.
2135 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2136 bool RetVal = false;
2137 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2139 if (I->isCtrl()) continue;
2140 const SUnit *PredSU = I->getSUnit();
2141 if (PredSU->getNode() &&
2142 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2144 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2145 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2155 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2156 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2157 /// it has no other use. It should be scheduled closer to the terminator.
2158 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2159 bool RetVal = false;
2160 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2162 if (I->isCtrl()) continue;
2163 const SUnit *SuccSU = I->getSUnit();
2164 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2166 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2167 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2177 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2178 // set isVRegCycle for its CopyFromReg operands.
2180 // This is only relevant for single-block loops, in which case the VRegCycle
2181 // node is likely an induction variable in which the operand and target virtual
2182 // registers should be coalesced (e.g. pre/post increment values). Setting the
2183 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2184 // CopyFromReg so that this node becomes the virtual register "kill". This
2185 // avoids interference between the values live in and out of the block and
2186 // eliminates a copy inside the loop.
2187 static void initVRegCycle(SUnit *SU) {
2188 if (DisableSchedVRegCycle)
2191 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2194 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2196 SU->isVRegCycle = true;
2198 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2200 if (I->isCtrl()) continue;
2201 I->getSUnit()->isVRegCycle = true;
2205 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2206 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2207 static void resetVRegCycle(SUnit *SU) {
2208 if (!SU->isVRegCycle)
2211 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2213 if (I->isCtrl()) continue; // ignore chain preds
2214 SUnit *PredSU = I->getSUnit();
2215 if (PredSU->isVRegCycle) {
2216 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2217 "VRegCycle def must be CopyFromReg");
2218 I->getSUnit()->isVRegCycle = 0;
2223 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2224 // means a node that defines the VRegCycle has not been scheduled yet.
2225 static bool hasVRegCycleUse(const SUnit *SU) {
2226 // If this SU also defines the VReg, don't hoist it as a "use".
2227 if (SU->isVRegCycle)
2230 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2232 if (I->isCtrl()) continue; // ignore chain preds
2233 if (I->getSUnit()->isVRegCycle &&
2234 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2235 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2242 // Check for either a dependence (latency) or resource (hazard) stall.
2244 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2245 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2246 if ((int)SPQ->getCurCycle() < Height) return true;
2247 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2248 != ScheduleHazardRecognizer::NoHazard)
2253 // Return -1 if left has higher priority, 1 if right has higher priority.
2254 // Return 0 if latency-based priority is equivalent.
2255 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2256 RegReductionPQBase *SPQ) {
2257 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2258 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2259 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2260 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2261 int LHeight = (int)left->getHeight() + LPenalty;
2262 int RHeight = (int)right->getHeight() + RPenalty;
2264 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2265 BUHasStall(left, LHeight, SPQ);
2266 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2267 BUHasStall(right, RHeight, SPQ);
2269 // If scheduling one of the node will cause a pipeline stall, delay it.
2270 // If scheduling either one of the node will cause a pipeline stall, sort
2271 // them according to their height.
2274 DEBUG(++FactorCount[FactStall]);
2277 if (LHeight != RHeight) {
2278 DEBUG(++FactorCount[FactStall]);
2279 return LHeight > RHeight ? 1 : -1;
2281 } else if (RStall) {
2282 DEBUG(++FactorCount[FactStall]);
2286 // If either node is scheduling for latency, sort them by height/depth
2288 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2289 right->SchedulingPref == Sched::ILP)) {
2290 if (DisableSchedCycles) {
2291 if (LHeight != RHeight) {
2292 DEBUG(++FactorCount[FactHeight]);
2293 return LHeight > RHeight ? 1 : -1;
2297 // If neither instruction stalls (!LStall && !RStall) then
2298 // its height is already covered so only its depth matters. We also reach
2299 // this if both stall but have the same height.
2300 int LDepth = left->getDepth() - LPenalty;
2301 int RDepth = right->getDepth() - RPenalty;
2302 if (LDepth != RDepth) {
2303 DEBUG(++FactorCount[FactDepth]);
2304 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2305 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2306 << ") depth " << RDepth << "\n");
2307 return LDepth < RDepth ? 1 : -1;
2310 if (left->Latency != right->Latency) {
2311 DEBUG(++FactorCount[FactOther]);
2312 return left->Latency > right->Latency ? 1 : -1;
2318 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2319 // Schedule physical register definitions close to their use. This is
2320 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2321 // long as shortening physreg live ranges is generally good, we can defer
2322 // creating a subtarget hook.
2323 if (!DisableSchedPhysRegJoin) {
2324 bool LHasPhysReg = left->hasPhysRegDefs;
2325 bool RHasPhysReg = right->hasPhysRegDefs;
2326 if (LHasPhysReg != RHasPhysReg) {
2327 DEBUG(++FactorCount[FactRegUses]);
2329 const char *PhysRegMsg[] = {" has no physreg", " defines a physreg"};
2331 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2332 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2333 << PhysRegMsg[RHasPhysReg] << "\n");
2334 return LHasPhysReg < RHasPhysReg;
2338 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2339 unsigned LPriority = SPQ->getNodePriority(left);
2340 unsigned RPriority = SPQ->getNodePriority(right);
2342 // Be really careful about hoisting call operands above previous calls.
2343 // Only allows it if it would reduce register pressure.
2344 if (left->isCall && right->isCallOp) {
2345 unsigned RNumVals = right->getNode()->getNumValues();
2346 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2348 if (right->isCall && left->isCallOp) {
2349 unsigned LNumVals = left->getNode()->getNumValues();
2350 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2353 if (LPriority != RPriority) {
2354 DEBUG(++FactorCount[FactStatic]);
2355 return LPriority > RPriority;
2358 // One or both of the nodes are calls and their sethi-ullman numbers are the
2359 // same, then keep source order.
2360 if (left->isCall || right->isCall) {
2361 unsigned LOrder = SPQ->getNodeOrdering(left);
2362 unsigned ROrder = SPQ->getNodeOrdering(right);
2364 // Prefer an ordering where the lower the non-zero order number, the higher
2366 if ((LOrder || ROrder) && LOrder != ROrder)
2367 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2370 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2375 // and the following instructions are both ready.
2379 // Then schedule t2 = op first.
2386 // This creates more short live intervals.
2387 unsigned LDist = closestSucc(left);
2388 unsigned RDist = closestSucc(right);
2389 if (LDist != RDist) {
2390 DEBUG(++FactorCount[FactOther]);
2391 return LDist < RDist;
2394 // How many registers becomes live when the node is scheduled.
2395 unsigned LScratch = calcMaxScratches(left);
2396 unsigned RScratch = calcMaxScratches(right);
2397 if (LScratch != RScratch) {
2398 DEBUG(++FactorCount[FactOther]);
2399 return LScratch > RScratch;
2402 // Comparing latency against a call makes little sense unless the node
2403 // is register pressure-neutral.
2404 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2405 return (left->NodeQueueId > right->NodeQueueId);
2407 // Do not compare latencies when one or both of the nodes are calls.
2408 if (!DisableSchedCycles &&
2409 !(left->isCall || right->isCall)) {
2410 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2415 if (left->getHeight() != right->getHeight()) {
2416 DEBUG(++FactorCount[FactHeight]);
2417 return left->getHeight() > right->getHeight();
2420 if (left->getDepth() != right->getDepth()) {
2421 DEBUG(++FactorCount[FactDepth]);
2422 return left->getDepth() < right->getDepth();
2426 assert(left->NodeQueueId && right->NodeQueueId &&
2427 "NodeQueueId cannot be zero");
2428 DEBUG(++FactorCount[FactOther]);
2429 return (left->NodeQueueId > right->NodeQueueId);
2433 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2434 if (int res = checkSpecialNodes(left, right))
2437 return BURRSort(left, right, SPQ);
2440 // Source order, otherwise bottom up.
2441 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2442 if (int res = checkSpecialNodes(left, right))
2445 unsigned LOrder = SPQ->getNodeOrdering(left);
2446 unsigned ROrder = SPQ->getNodeOrdering(right);
2448 // Prefer an ordering where the lower the non-zero order number, the higher
2450 if ((LOrder || ROrder) && LOrder != ROrder)
2451 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2453 return BURRSort(left, right, SPQ);
2456 // If the time between now and when the instruction will be ready can cover
2457 // the spill code, then avoid adding it to the ready queue. This gives long
2458 // stalls highest priority and allows hoisting across calls. It should also
2459 // speed up processing the available queue.
2460 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2461 static const unsigned ReadyDelay = 3;
2463 if (SPQ->MayReduceRegPressure(SU)) return true;
2465 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2467 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2468 != ScheduleHazardRecognizer::NoHazard)
2474 // Return true if right should be scheduled with higher priority than left.
2475 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2476 if (int res = checkSpecialNodes(left, right))
2479 if (left->isCall || right->isCall)
2480 // No way to compute latency of calls.
2481 return BURRSort(left, right, SPQ);
2483 bool LHigh = SPQ->HighRegPressure(left);
2484 bool RHigh = SPQ->HighRegPressure(right);
2485 // Avoid causing spills. If register pressure is high, schedule for
2486 // register pressure reduction.
2487 if (LHigh && !RHigh) {
2488 DEBUG(++FactorCount[FactPressureDiff]);
2489 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2490 << right->NodeNum << ")\n");
2493 else if (!LHigh && RHigh) {
2494 DEBUG(++FactorCount[FactPressureDiff]);
2495 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2496 << left->NodeNum << ")\n");
2499 if (!LHigh && !RHigh) {
2500 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2504 return BURRSort(left, right, SPQ);
2507 // Schedule as many instructions in each cycle as possible. So don't make an
2508 // instruction available unless it is ready in the current cycle.
2509 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2510 if (SU->getHeight() > CurCycle) return false;
2512 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2513 != ScheduleHazardRecognizer::NoHazard)
2519 static bool canEnableCoalescing(SUnit *SU) {
2520 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2521 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2522 // CopyToReg should be close to its uses to facilitate coalescing and
2526 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2527 Opc == TargetOpcode::SUBREG_TO_REG ||
2528 Opc == TargetOpcode::INSERT_SUBREG)
2529 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2530 // close to their uses to facilitate coalescing.
2533 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2534 // If SU does not have a register def, schedule it close to its uses
2535 // because it does not lengthen any live ranges.
2541 // list-ilp is currently an experimental scheduler that allows various
2542 // heuristics to be enabled prior to the normal register reduction logic.
2543 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2544 if (int res = checkSpecialNodes(left, right))
2547 if (left->isCall || right->isCall)
2548 // No way to compute latency of calls.
2549 return BURRSort(left, right, SPQ);
2551 unsigned LLiveUses = 0, RLiveUses = 0;
2552 int LPDiff = 0, RPDiff = 0;
2553 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2554 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2555 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2557 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2558 DEBUG(++FactorCount[FactPressureDiff]);
2559 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2560 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2561 return LPDiff > RPDiff;
2564 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2565 bool LReduce = canEnableCoalescing(left);
2566 bool RReduce = canEnableCoalescing(right);
2567 DEBUG(if (LReduce != RReduce) ++FactorCount[FactPressureDiff]);
2568 if (LReduce && !RReduce) return false;
2569 if (RReduce && !LReduce) return true;
2572 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2573 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2574 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2575 DEBUG(++FactorCount[FactRegUses]);
2576 return LLiveUses < RLiveUses;
2579 if (!DisableSchedStalls) {
2580 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2581 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2582 if (LStall != RStall) {
2583 DEBUG(++FactorCount[FactHeight]);
2584 return left->getHeight() > right->getHeight();
2588 if (!DisableSchedCriticalPath) {
2589 int spread = (int)left->getDepth() - (int)right->getDepth();
2590 if (std::abs(spread) > MaxReorderWindow) {
2591 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2592 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2593 << right->getDepth() << "\n");
2594 DEBUG(++FactorCount[FactDepth]);
2595 return left->getDepth() < right->getDepth();
2599 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2600 int spread = (int)left->getHeight() - (int)right->getHeight();
2601 if (std::abs(spread) > MaxReorderWindow) {
2602 DEBUG(++FactorCount[FactHeight]);
2603 return left->getHeight() > right->getHeight();
2607 return BURRSort(left, right, SPQ);
2610 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2612 // Add pseudo dependency edges for two-address nodes.
2613 AddPseudoTwoAddrDeps();
2614 // Reroute edges to nodes with multiple uses.
2615 if (!TracksRegPressure)
2616 PrescheduleNodesWithMultipleUses();
2617 // Calculate node priorities.
2618 CalculateSethiUllmanNumbers();
2620 // For single block loops, mark nodes that look like canonical IV increments.
2621 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2622 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2623 initVRegCycle(&sunits[i]);
2628 //===----------------------------------------------------------------------===//
2629 // Preschedule for Register Pressure
2630 //===----------------------------------------------------------------------===//
2632 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2633 if (SU->isTwoAddress) {
2634 unsigned Opc = SU->getNode()->getMachineOpcode();
2635 const MCInstrDesc &MCID = TII->get(Opc);
2636 unsigned NumRes = MCID.getNumDefs();
2637 unsigned NumOps = MCID.getNumOperands() - NumRes;
2638 for (unsigned i = 0; i != NumOps; ++i) {
2639 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2640 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2641 if (DU->getNodeId() != -1 &&
2642 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2650 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2651 /// successor's explicit physregs whose definition can reach DepSU.
2652 /// i.e. DepSU should not be scheduled above SU.
2653 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2654 ScheduleDAGRRList *scheduleDAG,
2655 const TargetInstrInfo *TII,
2656 const TargetRegisterInfo *TRI) {
2657 const unsigned *ImpDefs
2658 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2662 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2664 SUnit *SuccSU = SI->getSUnit();
2665 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2666 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2667 if (!PI->isAssignedRegDep())
2670 for (const unsigned *ImpDef = ImpDefs; *ImpDef; ++ImpDef) {
2671 // Return true if SU clobbers this physical register use and the
2672 // definition of the register reaches from DepSU. IsReachable queries a
2673 // topological forward sort of the DAG (following the successors).
2674 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2675 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2683 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2684 /// physical register defs.
2685 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2686 const TargetInstrInfo *TII,
2687 const TargetRegisterInfo *TRI) {
2688 SDNode *N = SuccSU->getNode();
2689 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2690 const unsigned *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2691 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2692 for (const SDNode *SUNode = SU->getNode(); SUNode;
2693 SUNode = SUNode->getGluedNode()) {
2694 if (!SUNode->isMachineOpcode())
2696 const unsigned *SUImpDefs =
2697 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2700 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2701 EVT VT = N->getValueType(i);
2702 if (VT == MVT::Glue || VT == MVT::Other)
2704 if (!N->hasAnyUseOfValue(i))
2706 unsigned Reg = ImpDefs[i - NumDefs];
2707 for (;*SUImpDefs; ++SUImpDefs) {
2708 unsigned SUReg = *SUImpDefs;
2709 if (TRI->regsOverlap(Reg, SUReg))
2717 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2718 /// are not handled well by the general register pressure reduction
2719 /// heuristics. When presented with code like this:
2728 /// the heuristics tend to push the store up, but since the
2729 /// operand of the store has another use (U), this would increase
2730 /// the length of that other use (the U->N edge).
2732 /// This function transforms code like the above to route U's
2733 /// dependence through the store when possible, like this:
2744 /// This results in the store being scheduled immediately
2745 /// after N, which shortens the U->N live range, reducing
2746 /// register pressure.
2748 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2749 // Visit all the nodes in topological order, working top-down.
2750 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2751 SUnit *SU = &(*SUnits)[i];
2752 // For now, only look at nodes with no data successors, such as stores.
2753 // These are especially important, due to the heuristics in
2754 // getNodePriority for nodes with no data successors.
2755 if (SU->NumSuccs != 0)
2757 // For now, only look at nodes with exactly one data predecessor.
2758 if (SU->NumPreds != 1)
2760 // Avoid prescheduling copies to virtual registers, which don't behave
2761 // like other nodes from the perspective of scheduling heuristics.
2762 if (SDNode *N = SU->getNode())
2763 if (N->getOpcode() == ISD::CopyToReg &&
2764 TargetRegisterInfo::isVirtualRegister
2765 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2768 // Locate the single data predecessor.
2770 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2771 EE = SU->Preds.end(); II != EE; ++II)
2772 if (!II->isCtrl()) {
2773 PredSU = II->getSUnit();
2778 // Don't rewrite edges that carry physregs, because that requires additional
2779 // support infrastructure.
2780 if (PredSU->hasPhysRegDefs)
2782 // Short-circuit the case where SU is PredSU's only data successor.
2783 if (PredSU->NumSuccs == 1)
2785 // Avoid prescheduling to copies from virtual registers, which don't behave
2786 // like other nodes from the perspective of scheduling heuristics.
2787 if (SDNode *N = SU->getNode())
2788 if (N->getOpcode() == ISD::CopyFromReg &&
2789 TargetRegisterInfo::isVirtualRegister
2790 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2793 // Perform checks on the successors of PredSU.
2794 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2795 EE = PredSU->Succs.end(); II != EE; ++II) {
2796 SUnit *PredSuccSU = II->getSUnit();
2797 if (PredSuccSU == SU) continue;
2798 // If PredSU has another successor with no data successors, for
2799 // now don't attempt to choose either over the other.
2800 if (PredSuccSU->NumSuccs == 0)
2801 goto outer_loop_continue;
2802 // Don't break physical register dependencies.
2803 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2804 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2805 goto outer_loop_continue;
2806 // Don't introduce graph cycles.
2807 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2808 goto outer_loop_continue;
2811 // Ok, the transformation is safe and the heuristics suggest it is
2812 // profitable. Update the graph.
2813 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2814 << " next to PredSU #" << PredSU->NodeNum
2815 << " to guide scheduling in the presence of multiple uses\n");
2816 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2817 SDep Edge = PredSU->Succs[i];
2818 assert(!Edge.isAssignedRegDep());
2819 SUnit *SuccSU = Edge.getSUnit();
2821 Edge.setSUnit(PredSU);
2822 scheduleDAG->RemovePred(SuccSU, Edge);
2823 scheduleDAG->AddPred(SU, Edge);
2825 scheduleDAG->AddPred(SuccSU, Edge);
2829 outer_loop_continue:;
2833 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2834 /// it as a def&use operand. Add a pseudo control edge from it to the other
2835 /// node (if it won't create a cycle) so the two-address one will be scheduled
2836 /// first (lower in the schedule). If both nodes are two-address, favor the
2837 /// one that has a CopyToReg use (more likely to be a loop induction update).
2838 /// If both are two-address, but one is commutable while the other is not
2839 /// commutable, favor the one that's not commutable.
2840 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2841 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2842 SUnit *SU = &(*SUnits)[i];
2843 if (!SU->isTwoAddress)
2846 SDNode *Node = SU->getNode();
2847 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2850 bool isLiveOut = hasOnlyLiveOutUses(SU);
2851 unsigned Opc = Node->getMachineOpcode();
2852 const MCInstrDesc &MCID = TII->get(Opc);
2853 unsigned NumRes = MCID.getNumDefs();
2854 unsigned NumOps = MCID.getNumOperands() - NumRes;
2855 for (unsigned j = 0; j != NumOps; ++j) {
2856 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2858 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2859 if (DU->getNodeId() == -1)
2861 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2862 if (!DUSU) continue;
2863 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2864 E = DUSU->Succs.end(); I != E; ++I) {
2865 if (I->isCtrl()) continue;
2866 SUnit *SuccSU = I->getSUnit();
2869 // Be conservative. Ignore if nodes aren't at roughly the same
2870 // depth and height.
2871 if (SuccSU->getHeight() < SU->getHeight() &&
2872 (SU->getHeight() - SuccSU->getHeight()) > 1)
2874 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2875 // constrains whatever is using the copy, instead of the copy
2876 // itself. In the case that the copy is coalesced, this
2877 // preserves the intent of the pseudo two-address heurietics.
2878 while (SuccSU->Succs.size() == 1 &&
2879 SuccSU->getNode()->isMachineOpcode() &&
2880 SuccSU->getNode()->getMachineOpcode() ==
2881 TargetOpcode::COPY_TO_REGCLASS)
2882 SuccSU = SuccSU->Succs.front().getSUnit();
2883 // Don't constrain non-instruction nodes.
2884 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2886 // Don't constrain nodes with physical register defs if the
2887 // predecessor can clobber them.
2888 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2889 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2892 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2893 // these may be coalesced away. We want them close to their uses.
2894 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2895 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2896 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2897 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2899 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2900 (!canClobber(SuccSU, DUSU) ||
2901 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2902 (!SU->isCommutable && SuccSU->isCommutable)) &&
2903 !scheduleDAG->IsReachable(SuccSU, SU)) {
2904 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2905 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2906 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Order, /*Latency=*/0,
2907 /*Reg=*/0, /*isNormalMemory=*/false,
2908 /*isMustAlias=*/false,
2909 /*isArtificial=*/true));
2916 //===----------------------------------------------------------------------===//
2917 // Public Constructor Functions
2918 //===----------------------------------------------------------------------===//
2920 llvm::ScheduleDAGSDNodes *
2921 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2922 CodeGenOpt::Level OptLevel) {
2923 const TargetMachine &TM = IS->TM;
2924 const TargetInstrInfo *TII = TM.getInstrInfo();
2925 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2927 BURegReductionPriorityQueue *PQ =
2928 new BURegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2929 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2930 PQ->setScheduleDAG(SD);
2934 llvm::ScheduleDAGSDNodes *
2935 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2936 CodeGenOpt::Level OptLevel) {
2937 const TargetMachine &TM = IS->TM;
2938 const TargetInstrInfo *TII = TM.getInstrInfo();
2939 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2941 SrcRegReductionPriorityQueue *PQ =
2942 new SrcRegReductionPriorityQueue(*IS->MF, false, TII, TRI, 0);
2943 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2944 PQ->setScheduleDAG(SD);
2948 llvm::ScheduleDAGSDNodes *
2949 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
2950 CodeGenOpt::Level OptLevel) {
2951 const TargetMachine &TM = IS->TM;
2952 const TargetInstrInfo *TII = TM.getInstrInfo();
2953 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2954 const TargetLowering *TLI = &IS->getTargetLowering();
2956 HybridBURRPriorityQueue *PQ =
2957 new HybridBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2959 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2960 PQ->setScheduleDAG(SD);
2964 llvm::ScheduleDAGSDNodes *
2965 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
2966 CodeGenOpt::Level OptLevel) {
2967 const TargetMachine &TM = IS->TM;
2968 const TargetInstrInfo *TII = TM.getInstrInfo();
2969 const TargetRegisterInfo *TRI = TM.getRegisterInfo();
2970 const TargetLowering *TLI = &IS->getTargetLowering();
2972 ILPBURRPriorityQueue *PQ =
2973 new ILPBURRPriorityQueue(*IS->MF, true, TII, TRI, TLI);
2974 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
2975 PQ->setScheduleDAG(SD);