1 //===----- ScheduleDAGRRList.cpp - Reg pressure reduction list scheduler --===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements bottom-up and top-down register pressure reduction list
11 // schedulers, using standard algorithms. The basic approach uses a priority
12 // queue of available nodes to schedule. One at a time, nodes are taken from
13 // the priority queue (thus in priority order), checked for legality to
14 // schedule, and emitted if legal.
16 //===----------------------------------------------------------------------===//
18 #include "llvm/CodeGen/SchedulerRegistry.h"
19 #include "ScheduleDAGSDNodes.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/CodeGen/MachineRegisterInfo.h"
24 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
25 #include "llvm/CodeGen/SelectionDAGISel.h"
26 #include "llvm/IR/DataLayout.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/ErrorHandling.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetInstrInfo.h"
32 #include "llvm/Target/TargetLowering.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Target/TargetRegisterInfo.h"
35 #include "llvm/Target/TargetSubtargetInfo.h"
39 #define DEBUG_TYPE "pre-RA-sched"
41 STATISTIC(NumBacktracks, "Number of times scheduler backtracked");
42 STATISTIC(NumUnfolds, "Number of nodes unfolded");
43 STATISTIC(NumDups, "Number of duplicated nodes");
44 STATISTIC(NumPRCopies, "Number of physical register copies");
46 static RegisterScheduler
47 burrListDAGScheduler("list-burr",
48 "Bottom-up register reduction list scheduling",
49 createBURRListDAGScheduler);
50 static RegisterScheduler
51 sourceListDAGScheduler("source",
52 "Similar to list-burr but schedules in source "
53 "order when possible",
54 createSourceListDAGScheduler);
56 static RegisterScheduler
57 hybridListDAGScheduler("list-hybrid",
58 "Bottom-up register pressure aware list scheduling "
59 "which tries to balance latency and register pressure",
60 createHybridListDAGScheduler);
62 static RegisterScheduler
63 ILPListDAGScheduler("list-ilp",
64 "Bottom-up register pressure aware list scheduling "
65 "which tries to balance ILP and register pressure",
66 createILPListDAGScheduler);
68 static cl::opt<bool> DisableSchedCycles(
69 "disable-sched-cycles", cl::Hidden, cl::init(false),
70 cl::desc("Disable cycle-level precision during preRA scheduling"));
72 // Temporary sched=list-ilp flags until the heuristics are robust.
73 // Some options are also available under sched=list-hybrid.
74 static cl::opt<bool> DisableSchedRegPressure(
75 "disable-sched-reg-pressure", cl::Hidden, cl::init(false),
76 cl::desc("Disable regpressure priority in sched=list-ilp"));
77 static cl::opt<bool> DisableSchedLiveUses(
78 "disable-sched-live-uses", cl::Hidden, cl::init(true),
79 cl::desc("Disable live use priority in sched=list-ilp"));
80 static cl::opt<bool> DisableSchedVRegCycle(
81 "disable-sched-vrcycle", cl::Hidden, cl::init(false),
82 cl::desc("Disable virtual register cycle interference checks"));
83 static cl::opt<bool> DisableSchedPhysRegJoin(
84 "disable-sched-physreg-join", cl::Hidden, cl::init(false),
85 cl::desc("Disable physreg def-use affinity"));
86 static cl::opt<bool> DisableSchedStalls(
87 "disable-sched-stalls", cl::Hidden, cl::init(true),
88 cl::desc("Disable no-stall priority in sched=list-ilp"));
89 static cl::opt<bool> DisableSchedCriticalPath(
90 "disable-sched-critical-path", cl::Hidden, cl::init(false),
91 cl::desc("Disable critical path priority in sched=list-ilp"));
92 static cl::opt<bool> DisableSchedHeight(
93 "disable-sched-height", cl::Hidden, cl::init(false),
94 cl::desc("Disable scheduled-height priority in sched=list-ilp"));
95 static cl::opt<bool> Disable2AddrHack(
96 "disable-2addr-hack", cl::Hidden, cl::init(true),
97 cl::desc("Disable scheduler's two-address hack"));
99 static cl::opt<int> MaxReorderWindow(
100 "max-sched-reorder", cl::Hidden, cl::init(6),
101 cl::desc("Number of instructions to allow ahead of the critical path "
102 "in sched=list-ilp"));
104 static cl::opt<unsigned> AvgIPC(
105 "sched-avg-ipc", cl::Hidden, cl::init(1),
106 cl::desc("Average inst/cycle whan no target itinerary exists."));
109 //===----------------------------------------------------------------------===//
110 /// ScheduleDAGRRList - The actual register reduction list scheduler
111 /// implementation. This supports both top-down and bottom-up scheduling.
113 class ScheduleDAGRRList : public ScheduleDAGSDNodes {
115 /// NeedLatency - True if the scheduler will make use of latency information.
119 /// AvailableQueue - The priority queue to use for the available SUnits.
120 SchedulingPriorityQueue *AvailableQueue;
122 /// PendingQueue - This contains all of the instructions whose operands have
123 /// been issued, but their results are not ready yet (due to the latency of
124 /// the operation). Once the operands becomes available, the instruction is
125 /// added to the AvailableQueue.
126 std::vector<SUnit*> PendingQueue;
128 /// HazardRec - The hazard recognizer to use.
129 ScheduleHazardRecognizer *HazardRec;
131 /// CurCycle - The current scheduler state corresponds to this cycle.
134 /// MinAvailableCycle - Cycle of the soonest available instruction.
135 unsigned MinAvailableCycle;
137 /// IssueCount - Count instructions issued in this cycle
138 /// Currently valid only for bottom-up scheduling.
141 /// LiveRegDefs - A set of physical registers and their definition
142 /// that are "live". These nodes must be scheduled before any other nodes that
143 /// modifies the registers can be scheduled.
144 unsigned NumLiveRegs;
145 std::vector<SUnit*> LiveRegDefs;
146 std::vector<SUnit*> LiveRegGens;
148 // Collect interferences between physical register use/defs.
149 // Each interference is an SUnit and set of physical registers.
150 SmallVector<SUnit*, 4> Interferences;
151 typedef DenseMap<SUnit*, SmallVector<unsigned, 4> > LRegsMapT;
154 /// Topo - A topological ordering for SUnits which permits fast IsReachable
155 /// and similar queries.
156 ScheduleDAGTopologicalSort Topo;
158 // Hack to keep track of the inverse of FindCallSeqStart without more crazy
160 DenseMap<SUnit*, SUnit*> CallSeqEndForStart;
163 ScheduleDAGRRList(MachineFunction &mf, bool needlatency,
164 SchedulingPriorityQueue *availqueue,
165 CodeGenOpt::Level OptLevel)
166 : ScheduleDAGSDNodes(mf),
167 NeedLatency(needlatency), AvailableQueue(availqueue), CurCycle(0),
168 Topo(SUnits, nullptr) {
170 const TargetMachine &tm = mf.getTarget();
171 if (DisableSchedCycles || !NeedLatency)
172 HazardRec = new ScheduleHazardRecognizer();
175 tm.getSubtargetImpl()->getInstrInfo()->CreateTargetHazardRecognizer(
176 tm.getSubtargetImpl(), this);
179 ~ScheduleDAGRRList() {
181 delete AvailableQueue;
184 void Schedule() override;
186 ScheduleHazardRecognizer *getHazardRec() { return HazardRec; }
188 /// IsReachable - Checks if SU is reachable from TargetSU.
189 bool IsReachable(const SUnit *SU, const SUnit *TargetSU) {
190 return Topo.IsReachable(SU, TargetSU);
193 /// WillCreateCycle - Returns true if adding an edge from SU to TargetSU will
195 bool WillCreateCycle(SUnit *SU, SUnit *TargetSU) {
196 return Topo.WillCreateCycle(SU, TargetSU);
199 /// AddPred - adds a predecessor edge to SUnit SU.
200 /// This returns true if this is a new predecessor.
201 /// Updates the topological ordering if required.
202 void AddPred(SUnit *SU, const SDep &D) {
203 Topo.AddPred(SU, D.getSUnit());
207 /// RemovePred - removes a predecessor edge from SUnit SU.
208 /// This returns true if an edge was removed.
209 /// Updates the topological ordering if required.
210 void RemovePred(SUnit *SU, const SDep &D) {
211 Topo.RemovePred(SU, D.getSUnit());
216 bool isReady(SUnit *SU) {
217 return DisableSchedCycles || !AvailableQueue->hasReadyFilter() ||
218 AvailableQueue->isReady(SU);
221 void ReleasePred(SUnit *SU, const SDep *PredEdge);
222 void ReleasePredecessors(SUnit *SU);
223 void ReleasePending();
224 void AdvanceToCycle(unsigned NextCycle);
225 void AdvancePastStalls(SUnit *SU);
226 void EmitNode(SUnit *SU);
227 void ScheduleNodeBottomUp(SUnit*);
228 void CapturePred(SDep *PredEdge);
229 void UnscheduleNodeBottomUp(SUnit*);
230 void RestoreHazardCheckerBottomUp();
231 void BacktrackBottomUp(SUnit*, SUnit*);
232 SUnit *CopyAndMoveSuccessors(SUnit*);
233 void InsertCopiesAndMoveSuccs(SUnit*, unsigned,
234 const TargetRegisterClass*,
235 const TargetRegisterClass*,
236 SmallVectorImpl<SUnit*>&);
237 bool DelayForLiveRegsBottomUp(SUnit*, SmallVectorImpl<unsigned>&);
239 void releaseInterferences(unsigned Reg = 0);
241 SUnit *PickNodeToScheduleBottomUp();
242 void ListScheduleBottomUp();
244 /// CreateNewSUnit - Creates a new SUnit and returns a pointer to it.
245 /// Updates the topological ordering if required.
246 SUnit *CreateNewSUnit(SDNode *N) {
247 unsigned NumSUnits = SUnits.size();
248 SUnit *NewNode = newSUnit(N);
249 // Update the topological ordering.
250 if (NewNode->NodeNum >= NumSUnits)
251 Topo.InitDAGTopologicalSorting();
255 /// CreateClone - Creates a new SUnit from an existing one.
256 /// Updates the topological ordering if required.
257 SUnit *CreateClone(SUnit *N) {
258 unsigned NumSUnits = SUnits.size();
259 SUnit *NewNode = Clone(N);
260 // Update the topological ordering.
261 if (NewNode->NodeNum >= NumSUnits)
262 Topo.InitDAGTopologicalSorting();
266 /// forceUnitLatencies - Register-pressure-reducing scheduling doesn't
267 /// need actual latency information but the hybrid scheduler does.
268 bool forceUnitLatencies() const override {
272 } // end anonymous namespace
274 /// GetCostForDef - Looks up the register class and cost for a given definition.
275 /// Typically this just means looking up the representative register class,
276 /// but for untyped values (MVT::Untyped) it means inspecting the node's
277 /// opcode to determine what register class is being generated.
278 static void GetCostForDef(const ScheduleDAGSDNodes::RegDefIter &RegDefPos,
279 const TargetLowering *TLI,
280 const TargetInstrInfo *TII,
281 const TargetRegisterInfo *TRI,
282 unsigned &RegClass, unsigned &Cost,
283 const MachineFunction &MF) {
284 MVT VT = RegDefPos.GetValue();
286 // Special handling for untyped values. These values can only come from
287 // the expansion of custom DAG-to-DAG patterns.
288 if (VT == MVT::Untyped) {
289 const SDNode *Node = RegDefPos.GetNode();
291 // Special handling for CopyFromReg of untyped values.
292 if (!Node->isMachineOpcode() && Node->getOpcode() == ISD::CopyFromReg) {
293 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(1))->getReg();
294 const TargetRegisterClass *RC = MF.getRegInfo().getRegClass(Reg);
295 RegClass = RC->getID();
300 unsigned Opcode = Node->getMachineOpcode();
301 if (Opcode == TargetOpcode::REG_SEQUENCE) {
302 unsigned DstRCIdx = cast<ConstantSDNode>(Node->getOperand(0))->getZExtValue();
303 const TargetRegisterClass *RC = TRI->getRegClass(DstRCIdx);
304 RegClass = RC->getID();
309 unsigned Idx = RegDefPos.GetIdx();
310 const MCInstrDesc Desc = TII->get(Opcode);
311 const TargetRegisterClass *RC = TII->getRegClass(Desc, Idx, TRI, MF);
312 RegClass = RC->getID();
313 // FIXME: Cost arbitrarily set to 1 because there doesn't seem to be a
314 // better way to determine it.
317 RegClass = TLI->getRepRegClassFor(VT)->getID();
318 Cost = TLI->getRepRegClassCostFor(VT);
322 /// Schedule - Schedule the DAG using list scheduling.
323 void ScheduleDAGRRList::Schedule() {
325 << "********** List Scheduling BB#" << BB->getNumber()
326 << " '" << BB->getName() << "' **********\n");
330 MinAvailableCycle = DisableSchedCycles ? 0 : UINT_MAX;
332 // Allocate slots for each physical register, plus one for a special register
333 // to track the virtual resource of a calling sequence.
334 LiveRegDefs.resize(TRI->getNumRegs() + 1, nullptr);
335 LiveRegGens.resize(TRI->getNumRegs() + 1, nullptr);
336 CallSeqEndForStart.clear();
337 assert(Interferences.empty() && LRegsMap.empty() && "stale Interferences");
339 // Build the scheduling graph.
340 BuildSchedGraph(nullptr);
342 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
343 SUnits[su].dumpAll(this));
344 Topo.InitDAGTopologicalSorting();
346 AvailableQueue->initNodes(SUnits);
350 // Execute the actual scheduling loop.
351 ListScheduleBottomUp();
353 AvailableQueue->releaseState();
356 dbgs() << "*** Final schedule ***\n";
362 //===----------------------------------------------------------------------===//
363 // Bottom-Up Scheduling
364 //===----------------------------------------------------------------------===//
366 /// ReleasePred - Decrement the NumSuccsLeft count of a predecessor. Add it to
367 /// the AvailableQueue if the count reaches zero. Also update its cycle bound.
368 void ScheduleDAGRRList::ReleasePred(SUnit *SU, const SDep *PredEdge) {
369 SUnit *PredSU = PredEdge->getSUnit();
372 if (PredSU->NumSuccsLeft == 0) {
373 dbgs() << "*** Scheduling failed! ***\n";
375 dbgs() << " has been released too many times!\n";
376 llvm_unreachable(nullptr);
379 --PredSU->NumSuccsLeft;
381 if (!forceUnitLatencies()) {
382 // Updating predecessor's height. This is now the cycle when the
383 // predecessor can be scheduled without causing a pipeline stall.
384 PredSU->setHeightToAtLeast(SU->getHeight() + PredEdge->getLatency());
387 // If all the node's successors are scheduled, this node is ready
388 // to be scheduled. Ignore the special EntrySU node.
389 if (PredSU->NumSuccsLeft == 0 && PredSU != &EntrySU) {
390 PredSU->isAvailable = true;
392 unsigned Height = PredSU->getHeight();
393 if (Height < MinAvailableCycle)
394 MinAvailableCycle = Height;
396 if (isReady(PredSU)) {
397 AvailableQueue->push(PredSU);
399 // CapturePred and others may have left the node in the pending queue, avoid
401 else if (!PredSU->isPending) {
402 PredSU->isPending = true;
403 PendingQueue.push_back(PredSU);
408 /// IsChainDependent - Test if Outer is reachable from Inner through
409 /// chain dependencies.
410 static bool IsChainDependent(SDNode *Outer, SDNode *Inner,
412 const TargetInstrInfo *TII) {
417 // For a TokenFactor, examine each operand. There may be multiple ways
418 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
419 // most nesting in order to ensure that we find the corresponding match.
420 if (N->getOpcode() == ISD::TokenFactor) {
421 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
422 if (IsChainDependent(N->getOperand(i).getNode(), Inner, NestLevel, TII))
426 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
427 if (N->isMachineOpcode()) {
428 if (N->getMachineOpcode() ==
429 (unsigned)TII->getCallFrameDestroyOpcode()) {
431 } else if (N->getMachineOpcode() ==
432 (unsigned)TII->getCallFrameSetupOpcode()) {
438 // Otherwise, find the chain and continue climbing.
439 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
440 if (N->getOperand(i).getValueType() == MVT::Other) {
441 N = N->getOperand(i).getNode();
442 goto found_chain_operand;
445 found_chain_operand:;
446 if (N->getOpcode() == ISD::EntryToken)
451 /// FindCallSeqStart - Starting from the (lowered) CALLSEQ_END node, locate
452 /// the corresponding (lowered) CALLSEQ_BEGIN node.
454 /// NestLevel and MaxNested are used in recursion to indcate the current level
455 /// of nesting of CALLSEQ_BEGIN and CALLSEQ_END pairs, as well as the maximum
456 /// level seen so far.
458 /// TODO: It would be better to give CALLSEQ_END an explicit operand to point
459 /// to the corresponding CALLSEQ_BEGIN to avoid needing to search for it.
461 FindCallSeqStart(SDNode *N, unsigned &NestLevel, unsigned &MaxNest,
462 const TargetInstrInfo *TII) {
464 // For a TokenFactor, examine each operand. There may be multiple ways
465 // to get to the CALLSEQ_BEGIN, but we need to find the path with the
466 // most nesting in order to ensure that we find the corresponding match.
467 if (N->getOpcode() == ISD::TokenFactor) {
468 SDNode *Best = nullptr;
469 unsigned BestMaxNest = MaxNest;
470 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
471 unsigned MyNestLevel = NestLevel;
472 unsigned MyMaxNest = MaxNest;
473 if (SDNode *New = FindCallSeqStart(N->getOperand(i).getNode(),
474 MyNestLevel, MyMaxNest, TII))
475 if (!Best || (MyMaxNest > BestMaxNest)) {
477 BestMaxNest = MyMaxNest;
481 MaxNest = BestMaxNest;
484 // Check for a lowered CALLSEQ_BEGIN or CALLSEQ_END.
485 if (N->isMachineOpcode()) {
486 if (N->getMachineOpcode() ==
487 (unsigned)TII->getCallFrameDestroyOpcode()) {
489 MaxNest = std::max(MaxNest, NestLevel);
490 } else if (N->getMachineOpcode() ==
491 (unsigned)TII->getCallFrameSetupOpcode()) {
492 assert(NestLevel != 0);
498 // Otherwise, find the chain and continue climbing.
499 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
500 if (N->getOperand(i).getValueType() == MVT::Other) {
501 N = N->getOperand(i).getNode();
502 goto found_chain_operand;
505 found_chain_operand:;
506 if (N->getOpcode() == ISD::EntryToken)
511 /// Call ReleasePred for each predecessor, then update register live def/gen.
512 /// Always update LiveRegDefs for a register dependence even if the current SU
513 /// also defines the register. This effectively create one large live range
514 /// across a sequence of two-address node. This is important because the
515 /// entire chain must be scheduled together. Example:
518 /// flags = (2) addc flags
519 /// flags = (1) addc flags
523 /// LiveRegDefs[flags] = 3
524 /// LiveRegGens[flags] = 1
526 /// If (2) addc is unscheduled, then (1) addc must also be unscheduled to avoid
527 /// interference on flags.
528 void ScheduleDAGRRList::ReleasePredecessors(SUnit *SU) {
529 // Bottom up: release predecessors
530 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
532 ReleasePred(SU, &*I);
533 if (I->isAssignedRegDep()) {
534 // This is a physical register dependency and it's impossible or
535 // expensive to copy the register. Make sure nothing that can
536 // clobber the register is scheduled between the predecessor and
538 SUnit *RegDef = LiveRegDefs[I->getReg()]; (void)RegDef;
539 assert((!RegDef || RegDef == SU || RegDef == I->getSUnit()) &&
540 "interference on register dependence");
541 LiveRegDefs[I->getReg()] = I->getSUnit();
542 if (!LiveRegGens[I->getReg()]) {
544 LiveRegGens[I->getReg()] = SU;
549 // If we're scheduling a lowered CALLSEQ_END, find the corresponding
550 // CALLSEQ_BEGIN. Inject an artificial physical register dependence between
551 // these nodes, to prevent other calls from being interscheduled with them.
552 unsigned CallResource = TRI->getNumRegs();
553 if (!LiveRegDefs[CallResource])
554 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode())
555 if (Node->isMachineOpcode() &&
556 Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
557 unsigned NestLevel = 0;
558 unsigned MaxNest = 0;
559 SDNode *N = FindCallSeqStart(Node, NestLevel, MaxNest, TII);
561 SUnit *Def = &SUnits[N->getNodeId()];
562 CallSeqEndForStart[Def] = SU;
565 LiveRegDefs[CallResource] = Def;
566 LiveRegGens[CallResource] = SU;
571 /// Check to see if any of the pending instructions are ready to issue. If
572 /// so, add them to the available queue.
573 void ScheduleDAGRRList::ReleasePending() {
574 if (DisableSchedCycles) {
575 assert(PendingQueue.empty() && "pending instrs not allowed in this mode");
579 // If the available queue is empty, it is safe to reset MinAvailableCycle.
580 if (AvailableQueue->empty())
581 MinAvailableCycle = UINT_MAX;
583 // Check to see if any of the pending instructions are ready to issue. If
584 // so, add them to the available queue.
585 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
586 unsigned ReadyCycle = PendingQueue[i]->getHeight();
587 if (ReadyCycle < MinAvailableCycle)
588 MinAvailableCycle = ReadyCycle;
590 if (PendingQueue[i]->isAvailable) {
591 if (!isReady(PendingQueue[i]))
593 AvailableQueue->push(PendingQueue[i]);
595 PendingQueue[i]->isPending = false;
596 PendingQueue[i] = PendingQueue.back();
597 PendingQueue.pop_back();
602 /// Move the scheduler state forward by the specified number of Cycles.
603 void ScheduleDAGRRList::AdvanceToCycle(unsigned NextCycle) {
604 if (NextCycle <= CurCycle)
608 AvailableQueue->setCurCycle(NextCycle);
609 if (!HazardRec->isEnabled()) {
610 // Bypass lots of virtual calls in case of long latency.
611 CurCycle = NextCycle;
614 for (; CurCycle != NextCycle; ++CurCycle) {
615 HazardRec->RecedeCycle();
618 // FIXME: Instead of visiting the pending Q each time, set a dirty flag on the
619 // available Q to release pending nodes at least once before popping.
623 /// Move the scheduler state forward until the specified node's dependents are
624 /// ready and can be scheduled with no resource conflicts.
625 void ScheduleDAGRRList::AdvancePastStalls(SUnit *SU) {
626 if (DisableSchedCycles)
629 // FIXME: Nodes such as CopyFromReg probably should not advance the current
630 // cycle. Otherwise, we can wrongly mask real stalls. If the non-machine node
631 // has predecessors the cycle will be advanced when they are scheduled.
632 // But given the crude nature of modeling latency though such nodes, we
633 // currently need to treat these nodes like real instructions.
634 // if (!SU->getNode() || !SU->getNode()->isMachineOpcode()) return;
636 unsigned ReadyCycle = SU->getHeight();
638 // Bump CurCycle to account for latency. We assume the latency of other
639 // available instructions may be hidden by the stall (not a full pipe stall).
640 // This updates the hazard recognizer's cycle before reserving resources for
642 AdvanceToCycle(ReadyCycle);
644 // Calls are scheduled in their preceding cycle, so don't conflict with
645 // hazards from instructions after the call. EmitNode will reset the
646 // scoreboard state before emitting the call.
650 // FIXME: For resource conflicts in very long non-pipelined stages, we
651 // should probably skip ahead here to avoid useless scoreboard checks.
654 ScheduleHazardRecognizer::HazardType HT =
655 HazardRec->getHazardType(SU, -Stalls);
657 if (HT == ScheduleHazardRecognizer::NoHazard)
662 AdvanceToCycle(CurCycle + Stalls);
665 /// Record this SUnit in the HazardRecognizer.
666 /// Does not update CurCycle.
667 void ScheduleDAGRRList::EmitNode(SUnit *SU) {
668 if (!HazardRec->isEnabled())
671 // Check for phys reg copy.
675 switch (SU->getNode()->getOpcode()) {
677 assert(SU->getNode()->isMachineOpcode() &&
678 "This target-independent node should not be scheduled.");
680 case ISD::MERGE_VALUES:
681 case ISD::TokenFactor:
682 case ISD::LIFETIME_START:
683 case ISD::LIFETIME_END:
685 case ISD::CopyFromReg:
687 // Noops don't affect the scoreboard state. Copies are likely to be
691 // For inline asm, clear the pipeline state.
696 // Calls are scheduled with their preceding instructions. For bottom-up
697 // scheduling, clear the pipeline state before emitting.
701 HazardRec->EmitInstruction(SU);
704 static void resetVRegCycle(SUnit *SU);
706 /// ScheduleNodeBottomUp - Add the node to the schedule. Decrement the pending
707 /// count of its predecessors. If a predecessor pending count is zero, add it to
708 /// the Available queue.
709 void ScheduleDAGRRList::ScheduleNodeBottomUp(SUnit *SU) {
710 DEBUG(dbgs() << "\n*** Scheduling [" << CurCycle << "]: ");
711 DEBUG(SU->dump(this));
714 if (CurCycle < SU->getHeight())
715 DEBUG(dbgs() << " Height [" << SU->getHeight()
716 << "] pipeline stall!\n");
719 // FIXME: Do not modify node height. It may interfere with
720 // backtracking. Instead add a "ready cycle" to SUnit. Before scheduling the
721 // node its ready cycle can aid heuristics, and after scheduling it can
722 // indicate the scheduled cycle.
723 SU->setHeightToAtLeast(CurCycle);
725 // Reserve resources for the scheduled instruction.
728 Sequence.push_back(SU);
730 AvailableQueue->scheduledNode(SU);
732 // If HazardRec is disabled, and each inst counts as one cycle, then
733 // advance CurCycle before ReleasePredecessors to avoid useless pushes to
734 // PendingQueue for schedulers that implement HasReadyFilter.
735 if (!HazardRec->isEnabled() && AvgIPC < 2)
736 AdvanceToCycle(CurCycle + 1);
738 // Update liveness of predecessors before successors to avoid treating a
739 // two-address node as a live range def.
740 ReleasePredecessors(SU);
742 // Release all the implicit physical register defs that are live.
743 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
745 // LiveRegDegs[I->getReg()] != SU when SU is a two-address node.
746 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] == SU) {
747 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
749 LiveRegDefs[I->getReg()] = nullptr;
750 LiveRegGens[I->getReg()] = nullptr;
751 releaseInterferences(I->getReg());
754 // Release the special call resource dependence, if this is the beginning
756 unsigned CallResource = TRI->getNumRegs();
757 if (LiveRegDefs[CallResource] == SU)
758 for (const SDNode *SUNode = SU->getNode(); SUNode;
759 SUNode = SUNode->getGluedNode()) {
760 if (SUNode->isMachineOpcode() &&
761 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
762 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
764 LiveRegDefs[CallResource] = nullptr;
765 LiveRegGens[CallResource] = nullptr;
766 releaseInterferences(CallResource);
772 SU->isScheduled = true;
774 // Conditions under which the scheduler should eagerly advance the cycle:
775 // (1) No available instructions
776 // (2) All pipelines full, so available instructions must have hazards.
778 // If HazardRec is disabled, the cycle was pre-advanced before calling
779 // ReleasePredecessors. In that case, IssueCount should remain 0.
781 // Check AvailableQueue after ReleasePredecessors in case of zero latency.
782 if (HazardRec->isEnabled() || AvgIPC > 1) {
783 if (SU->getNode() && SU->getNode()->isMachineOpcode())
785 if ((HazardRec->isEnabled() && HazardRec->atIssueLimit())
786 || (!HazardRec->isEnabled() && IssueCount == AvgIPC))
787 AdvanceToCycle(CurCycle + 1);
791 /// CapturePred - This does the opposite of ReleasePred. Since SU is being
792 /// unscheduled, incrcease the succ left count of its predecessors. Remove
793 /// them from AvailableQueue if necessary.
794 void ScheduleDAGRRList::CapturePred(SDep *PredEdge) {
795 SUnit *PredSU = PredEdge->getSUnit();
796 if (PredSU->isAvailable) {
797 PredSU->isAvailable = false;
798 if (!PredSU->isPending)
799 AvailableQueue->remove(PredSU);
802 assert(PredSU->NumSuccsLeft < UINT_MAX && "NumSuccsLeft will overflow!");
803 ++PredSU->NumSuccsLeft;
806 /// UnscheduleNodeBottomUp - Remove the node from the schedule, update its and
807 /// its predecessor states to reflect the change.
808 void ScheduleDAGRRList::UnscheduleNodeBottomUp(SUnit *SU) {
809 DEBUG(dbgs() << "*** Unscheduling [" << SU->getHeight() << "]: ");
810 DEBUG(SU->dump(this));
812 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
815 if (I->isAssignedRegDep() && SU == LiveRegGens[I->getReg()]){
816 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
817 assert(LiveRegDefs[I->getReg()] == I->getSUnit() &&
818 "Physical register dependency violated?");
820 LiveRegDefs[I->getReg()] = nullptr;
821 LiveRegGens[I->getReg()] = nullptr;
822 releaseInterferences(I->getReg());
826 // Reclaim the special call resource dependence, if this is the beginning
828 unsigned CallResource = TRI->getNumRegs();
829 for (const SDNode *SUNode = SU->getNode(); SUNode;
830 SUNode = SUNode->getGluedNode()) {
831 if (SUNode->isMachineOpcode() &&
832 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameSetupOpcode()) {
834 LiveRegDefs[CallResource] = SU;
835 LiveRegGens[CallResource] = CallSeqEndForStart[SU];
839 // Release the special call resource dependence, if this is the end
841 if (LiveRegGens[CallResource] == SU)
842 for (const SDNode *SUNode = SU->getNode(); SUNode;
843 SUNode = SUNode->getGluedNode()) {
844 if (SUNode->isMachineOpcode() &&
845 SUNode->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
846 assert(NumLiveRegs > 0 && "NumLiveRegs is already zero!");
848 LiveRegDefs[CallResource] = nullptr;
849 LiveRegGens[CallResource] = nullptr;
850 releaseInterferences(CallResource);
854 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
856 if (I->isAssignedRegDep()) {
857 if (!LiveRegDefs[I->getReg()])
859 // This becomes the nearest def. Note that an earlier def may still be
860 // pending if this is a two-address node.
861 LiveRegDefs[I->getReg()] = SU;
862 if (LiveRegGens[I->getReg()] == nullptr ||
863 I->getSUnit()->getHeight() < LiveRegGens[I->getReg()]->getHeight())
864 LiveRegGens[I->getReg()] = I->getSUnit();
867 if (SU->getHeight() < MinAvailableCycle)
868 MinAvailableCycle = SU->getHeight();
870 SU->setHeightDirty();
871 SU->isScheduled = false;
872 SU->isAvailable = true;
873 if (!DisableSchedCycles && AvailableQueue->hasReadyFilter()) {
874 // Don't make available until backtracking is complete.
875 SU->isPending = true;
876 PendingQueue.push_back(SU);
879 AvailableQueue->push(SU);
881 AvailableQueue->unscheduledNode(SU);
884 /// After backtracking, the hazard checker needs to be restored to a state
885 /// corresponding the current cycle.
886 void ScheduleDAGRRList::RestoreHazardCheckerBottomUp() {
889 unsigned LookAhead = std::min((unsigned)Sequence.size(),
890 HazardRec->getMaxLookAhead());
894 std::vector<SUnit*>::const_iterator I = (Sequence.end() - LookAhead);
895 unsigned HazardCycle = (*I)->getHeight();
896 for (std::vector<SUnit*>::const_iterator E = Sequence.end(); I != E; ++I) {
898 for (; SU->getHeight() > HazardCycle; ++HazardCycle) {
899 HazardRec->RecedeCycle();
905 /// BacktrackBottomUp - Backtrack scheduling to a previous cycle specified in
906 /// BTCycle in order to schedule a specific node.
907 void ScheduleDAGRRList::BacktrackBottomUp(SUnit *SU, SUnit *BtSU) {
908 SUnit *OldSU = Sequence.back();
911 // FIXME: use ready cycle instead of height
912 CurCycle = OldSU->getHeight();
913 UnscheduleNodeBottomUp(OldSU);
914 AvailableQueue->setCurCycle(CurCycle);
917 OldSU = Sequence.back();
920 assert(!SU->isSucc(OldSU) && "Something is wrong!");
922 RestoreHazardCheckerBottomUp();
929 static bool isOperandOf(const SUnit *SU, SDNode *N) {
930 for (const SDNode *SUNode = SU->getNode(); SUNode;
931 SUNode = SUNode->getGluedNode()) {
932 if (SUNode->isOperandOf(N))
938 /// CopyAndMoveSuccessors - Clone the specified node and move its scheduled
939 /// successors to the newly created node.
940 SUnit *ScheduleDAGRRList::CopyAndMoveSuccessors(SUnit *SU) {
941 SDNode *N = SU->getNode();
945 if (SU->getNode()->getGluedNode())
949 bool TryUnfold = false;
950 for (unsigned i = 0, e = N->getNumValues(); i != e; ++i) {
951 EVT VT = N->getValueType(i);
954 else if (VT == MVT::Other)
957 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i) {
958 const SDValue &Op = N->getOperand(i);
959 EVT VT = Op.getNode()->getValueType(Op.getResNo());
965 SmallVector<SDNode*, 2> NewNodes;
966 if (!TII->unfoldMemoryOperand(*DAG, N, NewNodes))
969 // unfolding an x86 DEC64m operation results in store, dec, load which
970 // can't be handled here so quit
971 if (NewNodes.size() == 3)
974 DEBUG(dbgs() << "Unfolding SU #" << SU->NodeNum << "\n");
975 assert(NewNodes.size() == 2 && "Expected a load folding node!");
978 SDNode *LoadNode = NewNodes[0];
979 unsigned NumVals = N->getNumValues();
980 unsigned OldNumVals = SU->getNode()->getNumValues();
981 for (unsigned i = 0; i != NumVals; ++i)
982 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), i), SDValue(N, i));
983 DAG->ReplaceAllUsesOfValueWith(SDValue(SU->getNode(), OldNumVals-1),
984 SDValue(LoadNode, 1));
986 // LoadNode may already exist. This can happen when there is another
987 // load from the same location and producing the same type of value
988 // but it has different alignment or volatileness.
989 bool isNewLoad = true;
991 if (LoadNode->getNodeId() != -1) {
992 LoadSU = &SUnits[LoadNode->getNodeId()];
995 LoadSU = CreateNewSUnit(LoadNode);
996 LoadNode->setNodeId(LoadSU->NodeNum);
998 InitNumRegDefsLeft(LoadSU);
999 computeLatency(LoadSU);
1002 SUnit *NewSU = CreateNewSUnit(N);
1003 assert(N->getNodeId() == -1 && "Node already inserted!");
1004 N->setNodeId(NewSU->NodeNum);
1006 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1007 for (unsigned i = 0; i != MCID.getNumOperands(); ++i) {
1008 if (MCID.getOperandConstraint(i, MCOI::TIED_TO) != -1) {
1009 NewSU->isTwoAddress = true;
1013 if (MCID.isCommutable())
1014 NewSU->isCommutable = true;
1016 InitNumRegDefsLeft(NewSU);
1017 computeLatency(NewSU);
1019 // Record all the edges to and from the old SU, by category.
1020 SmallVector<SDep, 4> ChainPreds;
1021 SmallVector<SDep, 4> ChainSuccs;
1022 SmallVector<SDep, 4> LoadPreds;
1023 SmallVector<SDep, 4> NodePreds;
1024 SmallVector<SDep, 4> NodeSuccs;
1025 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1028 ChainPreds.push_back(*I);
1029 else if (isOperandOf(I->getSUnit(), LoadNode))
1030 LoadPreds.push_back(*I);
1032 NodePreds.push_back(*I);
1034 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1037 ChainSuccs.push_back(*I);
1039 NodeSuccs.push_back(*I);
1042 // Now assign edges to the newly-created nodes.
1043 for (unsigned i = 0, e = ChainPreds.size(); i != e; ++i) {
1044 const SDep &Pred = ChainPreds[i];
1045 RemovePred(SU, Pred);
1047 AddPred(LoadSU, Pred);
1049 for (unsigned i = 0, e = LoadPreds.size(); i != e; ++i) {
1050 const SDep &Pred = LoadPreds[i];
1051 RemovePred(SU, Pred);
1053 AddPred(LoadSU, Pred);
1055 for (unsigned i = 0, e = NodePreds.size(); i != e; ++i) {
1056 const SDep &Pred = NodePreds[i];
1057 RemovePred(SU, Pred);
1058 AddPred(NewSU, Pred);
1060 for (unsigned i = 0, e = NodeSuccs.size(); i != e; ++i) {
1061 SDep D = NodeSuccs[i];
1062 SUnit *SuccDep = D.getSUnit();
1064 RemovePred(SuccDep, D);
1066 AddPred(SuccDep, D);
1067 // Balance register pressure.
1068 if (AvailableQueue->tracksRegPressure() && SuccDep->isScheduled
1069 && !D.isCtrl() && NewSU->NumRegDefsLeft > 0)
1070 --NewSU->NumRegDefsLeft;
1072 for (unsigned i = 0, e = ChainSuccs.size(); i != e; ++i) {
1073 SDep D = ChainSuccs[i];
1074 SUnit *SuccDep = D.getSUnit();
1076 RemovePred(SuccDep, D);
1079 AddPred(SuccDep, D);
1083 // Add a data dependency to reflect that NewSU reads the value defined
1085 SDep D(LoadSU, SDep::Data, 0);
1086 D.setLatency(LoadSU->Latency);
1090 AvailableQueue->addNode(LoadSU);
1091 AvailableQueue->addNode(NewSU);
1095 if (NewSU->NumSuccsLeft == 0) {
1096 NewSU->isAvailable = true;
1102 DEBUG(dbgs() << " Duplicating SU #" << SU->NodeNum << "\n");
1103 NewSU = CreateClone(SU);
1105 // New SUnit has the exact same predecessors.
1106 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1108 if (!I->isArtificial())
1111 // Only copy scheduled successors. Cut them from old node's successor
1112 // list and move them over.
1113 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1114 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1116 if (I->isArtificial())
1118 SUnit *SuccSU = I->getSUnit();
1119 if (SuccSU->isScheduled) {
1124 DelDeps.push_back(std::make_pair(SuccSU, D));
1127 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1128 RemovePred(DelDeps[i].first, DelDeps[i].second);
1130 AvailableQueue->updateNode(SU);
1131 AvailableQueue->addNode(NewSU);
1137 /// InsertCopiesAndMoveSuccs - Insert register copies and move all
1138 /// scheduled successors of the given SUnit to the last copy.
1139 void ScheduleDAGRRList::InsertCopiesAndMoveSuccs(SUnit *SU, unsigned Reg,
1140 const TargetRegisterClass *DestRC,
1141 const TargetRegisterClass *SrcRC,
1142 SmallVectorImpl<SUnit*> &Copies) {
1143 SUnit *CopyFromSU = CreateNewSUnit(nullptr);
1144 CopyFromSU->CopySrcRC = SrcRC;
1145 CopyFromSU->CopyDstRC = DestRC;
1147 SUnit *CopyToSU = CreateNewSUnit(nullptr);
1148 CopyToSU->CopySrcRC = DestRC;
1149 CopyToSU->CopyDstRC = SrcRC;
1151 // Only copy scheduled successors. Cut them from old node's successor
1152 // list and move them over.
1153 SmallVector<std::pair<SUnit *, SDep>, 4> DelDeps;
1154 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1156 if (I->isArtificial())
1158 SUnit *SuccSU = I->getSUnit();
1159 if (SuccSU->isScheduled) {
1161 D.setSUnit(CopyToSU);
1163 DelDeps.push_back(std::make_pair(SuccSU, *I));
1166 // Avoid scheduling the def-side copy before other successors. Otherwise
1167 // we could introduce another physreg interference on the copy and
1168 // continue inserting copies indefinitely.
1169 AddPred(SuccSU, SDep(CopyFromSU, SDep::Artificial));
1172 for (unsigned i = 0, e = DelDeps.size(); i != e; ++i)
1173 RemovePred(DelDeps[i].first, DelDeps[i].second);
1175 SDep FromDep(SU, SDep::Data, Reg);
1176 FromDep.setLatency(SU->Latency);
1177 AddPred(CopyFromSU, FromDep);
1178 SDep ToDep(CopyFromSU, SDep::Data, 0);
1179 ToDep.setLatency(CopyFromSU->Latency);
1180 AddPred(CopyToSU, ToDep);
1182 AvailableQueue->updateNode(SU);
1183 AvailableQueue->addNode(CopyFromSU);
1184 AvailableQueue->addNode(CopyToSU);
1185 Copies.push_back(CopyFromSU);
1186 Copies.push_back(CopyToSU);
1191 /// getPhysicalRegisterVT - Returns the ValueType of the physical register
1192 /// definition of the specified node.
1193 /// FIXME: Move to SelectionDAG?
1194 static EVT getPhysicalRegisterVT(SDNode *N, unsigned Reg,
1195 const TargetInstrInfo *TII) {
1196 const MCInstrDesc &MCID = TII->get(N->getMachineOpcode());
1197 assert(MCID.ImplicitDefs && "Physical reg def must be in implicit def list!");
1198 unsigned NumRes = MCID.getNumDefs();
1199 for (const uint16_t *ImpDef = MCID.getImplicitDefs(); *ImpDef; ++ImpDef) {
1204 return N->getValueType(NumRes);
1207 /// CheckForLiveRegDef - Return true and update live register vector if the
1208 /// specified register def of the specified SUnit clobbers any "live" registers.
1209 static void CheckForLiveRegDef(SUnit *SU, unsigned Reg,
1210 std::vector<SUnit*> &LiveRegDefs,
1211 SmallSet<unsigned, 4> &RegAdded,
1212 SmallVectorImpl<unsigned> &LRegs,
1213 const TargetRegisterInfo *TRI) {
1214 for (MCRegAliasIterator AliasI(Reg, TRI, true); AliasI.isValid(); ++AliasI) {
1216 // Check if Ref is live.
1217 if (!LiveRegDefs[*AliasI]) continue;
1219 // Allow multiple uses of the same def.
1220 if (LiveRegDefs[*AliasI] == SU) continue;
1222 // Add Reg to the set of interfering live regs.
1223 if (RegAdded.insert(*AliasI)) {
1224 LRegs.push_back(*AliasI);
1229 /// CheckForLiveRegDefMasked - Check for any live physregs that are clobbered
1230 /// by RegMask, and add them to LRegs.
1231 static void CheckForLiveRegDefMasked(SUnit *SU, const uint32_t *RegMask,
1232 std::vector<SUnit*> &LiveRegDefs,
1233 SmallSet<unsigned, 4> &RegAdded,
1234 SmallVectorImpl<unsigned> &LRegs) {
1235 // Look at all live registers. Skip Reg0 and the special CallResource.
1236 for (unsigned i = 1, e = LiveRegDefs.size()-1; i != e; ++i) {
1237 if (!LiveRegDefs[i]) continue;
1238 if (LiveRegDefs[i] == SU) continue;
1239 if (!MachineOperand::clobbersPhysReg(RegMask, i)) continue;
1240 if (RegAdded.insert(i))
1245 /// getNodeRegMask - Returns the register mask attached to an SDNode, if any.
1246 static const uint32_t *getNodeRegMask(const SDNode *N) {
1247 for (unsigned i = 0, e = N->getNumOperands(); i != e; ++i)
1248 if (const RegisterMaskSDNode *Op =
1249 dyn_cast<RegisterMaskSDNode>(N->getOperand(i).getNode()))
1250 return Op->getRegMask();
1254 /// DelayForLiveRegsBottomUp - Returns true if it is necessary to delay
1255 /// scheduling of the given node to satisfy live physical register dependencies.
1256 /// If the specific node is the last one that's available to schedule, do
1257 /// whatever is necessary (i.e. backtracking or cloning) to make it possible.
1258 bool ScheduleDAGRRList::
1259 DelayForLiveRegsBottomUp(SUnit *SU, SmallVectorImpl<unsigned> &LRegs) {
1260 if (NumLiveRegs == 0)
1263 SmallSet<unsigned, 4> RegAdded;
1264 // If this node would clobber any "live" register, then it's not ready.
1266 // If SU is the currently live definition of the same register that it uses,
1267 // then we are free to schedule it.
1268 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1270 if (I->isAssignedRegDep() && LiveRegDefs[I->getReg()] != SU)
1271 CheckForLiveRegDef(I->getSUnit(), I->getReg(), LiveRegDefs,
1272 RegAdded, LRegs, TRI);
1275 for (SDNode *Node = SU->getNode(); Node; Node = Node->getGluedNode()) {
1276 if (Node->getOpcode() == ISD::INLINEASM) {
1277 // Inline asm can clobber physical defs.
1278 unsigned NumOps = Node->getNumOperands();
1279 if (Node->getOperand(NumOps-1).getValueType() == MVT::Glue)
1280 --NumOps; // Ignore the glue operand.
1282 for (unsigned i = InlineAsm::Op_FirstOperand; i != NumOps;) {
1284 cast<ConstantSDNode>(Node->getOperand(i))->getZExtValue();
1285 unsigned NumVals = InlineAsm::getNumOperandRegisters(Flags);
1287 ++i; // Skip the ID value.
1288 if (InlineAsm::isRegDefKind(Flags) ||
1289 InlineAsm::isRegDefEarlyClobberKind(Flags) ||
1290 InlineAsm::isClobberKind(Flags)) {
1291 // Check for def of register or earlyclobber register.
1292 for (; NumVals; --NumVals, ++i) {
1293 unsigned Reg = cast<RegisterSDNode>(Node->getOperand(i))->getReg();
1294 if (TargetRegisterInfo::isPhysicalRegister(Reg))
1295 CheckForLiveRegDef(SU, Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1303 if (!Node->isMachineOpcode())
1305 // If we're in the middle of scheduling a call, don't begin scheduling
1306 // another call. Also, don't allow any physical registers to be live across
1308 if (Node->getMachineOpcode() == (unsigned)TII->getCallFrameDestroyOpcode()) {
1309 // Check the special calling-sequence resource.
1310 unsigned CallResource = TRI->getNumRegs();
1311 if (LiveRegDefs[CallResource]) {
1312 SDNode *Gen = LiveRegGens[CallResource]->getNode();
1313 while (SDNode *Glued = Gen->getGluedNode())
1315 if (!IsChainDependent(Gen, Node, 0, TII) && RegAdded.insert(CallResource))
1316 LRegs.push_back(CallResource);
1319 if (const uint32_t *RegMask = getNodeRegMask(Node))
1320 CheckForLiveRegDefMasked(SU, RegMask, LiveRegDefs, RegAdded, LRegs);
1322 const MCInstrDesc &MCID = TII->get(Node->getMachineOpcode());
1323 if (!MCID.ImplicitDefs)
1325 for (const uint16_t *Reg = MCID.getImplicitDefs(); *Reg; ++Reg)
1326 CheckForLiveRegDef(SU, *Reg, LiveRegDefs, RegAdded, LRegs, TRI);
1329 return !LRegs.empty();
1332 void ScheduleDAGRRList::releaseInterferences(unsigned Reg) {
1333 // Add the nodes that aren't ready back onto the available list.
1334 for (unsigned i = Interferences.size(); i > 0; --i) {
1335 SUnit *SU = Interferences[i-1];
1336 LRegsMapT::iterator LRegsPos = LRegsMap.find(SU);
1338 SmallVectorImpl<unsigned> &LRegs = LRegsPos->second;
1339 if (std::find(LRegs.begin(), LRegs.end(), Reg) == LRegs.end())
1342 SU->isPending = false;
1343 // The interfering node may no longer be available due to backtracking.
1344 // Furthermore, it may have been made available again, in which case it is
1345 // now already in the AvailableQueue.
1346 if (SU->isAvailable && !SU->NodeQueueId) {
1347 DEBUG(dbgs() << " Repushing SU #" << SU->NodeNum << '\n');
1348 AvailableQueue->push(SU);
1350 if (i < Interferences.size())
1351 Interferences[i-1] = Interferences.back();
1352 Interferences.pop_back();
1353 LRegsMap.erase(LRegsPos);
1357 /// Return a node that can be scheduled in this cycle. Requirements:
1358 /// (1) Ready: latency has been satisfied
1359 /// (2) No Hazards: resources are available
1360 /// (3) No Interferences: may unschedule to break register interferences.
1361 SUnit *ScheduleDAGRRList::PickNodeToScheduleBottomUp() {
1362 SUnit *CurSU = AvailableQueue->empty() ? nullptr : AvailableQueue->pop();
1364 SmallVector<unsigned, 4> LRegs;
1365 if (!DelayForLiveRegsBottomUp(CurSU, LRegs))
1367 DEBUG(dbgs() << " Interfering reg " <<
1368 (LRegs[0] == TRI->getNumRegs() ? "CallResource"
1369 : TRI->getName(LRegs[0]))
1370 << " SU #" << CurSU->NodeNum << '\n');
1371 std::pair<LRegsMapT::iterator, bool> LRegsPair =
1372 LRegsMap.insert(std::make_pair(CurSU, LRegs));
1373 if (LRegsPair.second) {
1374 CurSU->isPending = true; // This SU is not in AvailableQueue right now.
1375 Interferences.push_back(CurSU);
1378 assert(CurSU->isPending && "Interferences are pending");
1379 // Update the interference with current live regs.
1380 LRegsPair.first->second = LRegs;
1382 CurSU = AvailableQueue->pop();
1387 // All candidates are delayed due to live physical reg dependencies.
1388 // Try backtracking, code duplication, or inserting cross class copies
1390 for (unsigned i = 0, e = Interferences.size(); i != e; ++i) {
1391 SUnit *TrySU = Interferences[i];
1392 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1394 // Try unscheduling up to the point where it's safe to schedule
1396 SUnit *BtSU = nullptr;
1397 unsigned LiveCycle = UINT_MAX;
1398 for (unsigned j = 0, ee = LRegs.size(); j != ee; ++j) {
1399 unsigned Reg = LRegs[j];
1400 if (LiveRegGens[Reg]->getHeight() < LiveCycle) {
1401 BtSU = LiveRegGens[Reg];
1402 LiveCycle = BtSU->getHeight();
1405 if (!WillCreateCycle(TrySU, BtSU)) {
1406 // BacktrackBottomUp mutates Interferences!
1407 BacktrackBottomUp(TrySU, BtSU);
1409 // Force the current node to be scheduled before the node that
1410 // requires the physical reg dep.
1411 if (BtSU->isAvailable) {
1412 BtSU->isAvailable = false;
1413 if (!BtSU->isPending)
1414 AvailableQueue->remove(BtSU);
1416 DEBUG(dbgs() << "ARTIFICIAL edge from SU(" << BtSU->NodeNum << ") to SU("
1417 << TrySU->NodeNum << ")\n");
1418 AddPred(TrySU, SDep(BtSU, SDep::Artificial));
1420 // If one or more successors has been unscheduled, then the current
1421 // node is no longer available.
1422 if (!TrySU->isAvailable)
1423 CurSU = AvailableQueue->pop();
1425 AvailableQueue->remove(TrySU);
1428 // Interferences has been mutated. We must break.
1434 // Can't backtrack. If it's too expensive to copy the value, then try
1435 // duplicate the nodes that produces these "too expensive to copy"
1436 // values to break the dependency. In case even that doesn't work,
1437 // insert cross class copies.
1438 // If it's not too expensive, i.e. cost != -1, issue copies.
1439 SUnit *TrySU = Interferences[0];
1440 SmallVectorImpl<unsigned> &LRegs = LRegsMap[TrySU];
1441 assert(LRegs.size() == 1 && "Can't handle this yet!");
1442 unsigned Reg = LRegs[0];
1443 SUnit *LRDef = LiveRegDefs[Reg];
1444 EVT VT = getPhysicalRegisterVT(LRDef->getNode(), Reg, TII);
1445 const TargetRegisterClass *RC =
1446 TRI->getMinimalPhysRegClass(Reg, VT);
1447 const TargetRegisterClass *DestRC = TRI->getCrossCopyRegClass(RC);
1449 // If cross copy register class is the same as RC, then it must be possible
1450 // copy the value directly. Do not try duplicate the def.
1451 // If cross copy register class is not the same as RC, then it's possible to
1452 // copy the value but it require cross register class copies and it is
1454 // If cross copy register class is null, then it's not possible to copy
1455 // the value at all.
1456 SUnit *NewDef = nullptr;
1458 NewDef = CopyAndMoveSuccessors(LRDef);
1459 if (!DestRC && !NewDef)
1460 report_fatal_error("Can't handle live physical register dependency!");
1463 // Issue copies, these can be expensive cross register class copies.
1464 SmallVector<SUnit*, 2> Copies;
1465 InsertCopiesAndMoveSuccs(LRDef, Reg, DestRC, RC, Copies);
1466 DEBUG(dbgs() << " Adding an edge from SU #" << TrySU->NodeNum
1467 << " to SU #" << Copies.front()->NodeNum << "\n");
1468 AddPred(TrySU, SDep(Copies.front(), SDep::Artificial));
1469 NewDef = Copies.back();
1472 DEBUG(dbgs() << " Adding an edge from SU #" << NewDef->NodeNum
1473 << " to SU #" << TrySU->NodeNum << "\n");
1474 LiveRegDefs[Reg] = NewDef;
1475 AddPred(NewDef, SDep(TrySU, SDep::Artificial));
1476 TrySU->isAvailable = false;
1479 assert(CurSU && "Unable to resolve live physical register dependencies!");
1483 /// ListScheduleBottomUp - The main loop of list scheduling for bottom-up
1485 void ScheduleDAGRRList::ListScheduleBottomUp() {
1486 // Release any predecessors of the special Exit node.
1487 ReleasePredecessors(&ExitSU);
1489 // Add root to Available queue.
1490 if (!SUnits.empty()) {
1491 SUnit *RootSU = &SUnits[DAG->getRoot().getNode()->getNodeId()];
1492 assert(RootSU->Succs.empty() && "Graph root shouldn't have successors!");
1493 RootSU->isAvailable = true;
1494 AvailableQueue->push(RootSU);
1497 // While Available queue is not empty, grab the node with the highest
1498 // priority. If it is not ready put it back. Schedule the node.
1499 Sequence.reserve(SUnits.size());
1500 while (!AvailableQueue->empty() || !Interferences.empty()) {
1501 DEBUG(dbgs() << "\nExamining Available:\n";
1502 AvailableQueue->dump(this));
1504 // Pick the best node to schedule taking all constraints into
1506 SUnit *SU = PickNodeToScheduleBottomUp();
1508 AdvancePastStalls(SU);
1510 ScheduleNodeBottomUp(SU);
1512 while (AvailableQueue->empty() && !PendingQueue.empty()) {
1513 // Advance the cycle to free resources. Skip ahead to the next ready SU.
1514 assert(MinAvailableCycle < UINT_MAX && "MinAvailableCycle uninitialized");
1515 AdvanceToCycle(std::max(CurCycle + 1, MinAvailableCycle));
1519 // Reverse the order if it is bottom up.
1520 std::reverse(Sequence.begin(), Sequence.end());
1523 VerifyScheduledSequence(/*isBottomUp=*/true);
1527 //===----------------------------------------------------------------------===//
1528 // RegReductionPriorityQueue Definition
1529 //===----------------------------------------------------------------------===//
1531 // This is a SchedulingPriorityQueue that schedules using Sethi Ullman numbers
1532 // to reduce register pressure.
1535 class RegReductionPQBase;
1537 struct queue_sort : public std::binary_function<SUnit*, SUnit*, bool> {
1538 bool isReady(SUnit* SU, unsigned CurCycle) const { return true; }
1543 struct reverse_sort : public queue_sort {
1545 reverse_sort(SF &sf) : SortFunc(sf) {}
1547 bool operator()(SUnit* left, SUnit* right) const {
1548 // reverse left/right rather than simply !SortFunc(left, right)
1549 // to expose different paths in the comparison logic.
1550 return SortFunc(right, left);
1555 /// bu_ls_rr_sort - Priority function for bottom up register pressure
1556 // reduction scheduler.
1557 struct bu_ls_rr_sort : public queue_sort {
1560 HasReadyFilter = false
1563 RegReductionPQBase *SPQ;
1564 bu_ls_rr_sort(RegReductionPQBase *spq) : SPQ(spq) {}
1566 bool operator()(SUnit* left, SUnit* right) const;
1569 // src_ls_rr_sort - Priority function for source order scheduler.
1570 struct src_ls_rr_sort : public queue_sort {
1573 HasReadyFilter = false
1576 RegReductionPQBase *SPQ;
1577 src_ls_rr_sort(RegReductionPQBase *spq)
1580 bool operator()(SUnit* left, SUnit* right) const;
1583 // hybrid_ls_rr_sort - Priority function for hybrid scheduler.
1584 struct hybrid_ls_rr_sort : public queue_sort {
1587 HasReadyFilter = false
1590 RegReductionPQBase *SPQ;
1591 hybrid_ls_rr_sort(RegReductionPQBase *spq)
1594 bool isReady(SUnit *SU, unsigned CurCycle) const;
1596 bool operator()(SUnit* left, SUnit* right) const;
1599 // ilp_ls_rr_sort - Priority function for ILP (instruction level parallelism)
1601 struct ilp_ls_rr_sort : public queue_sort {
1604 HasReadyFilter = false
1607 RegReductionPQBase *SPQ;
1608 ilp_ls_rr_sort(RegReductionPQBase *spq)
1611 bool isReady(SUnit *SU, unsigned CurCycle) const;
1613 bool operator()(SUnit* left, SUnit* right) const;
1616 class RegReductionPQBase : public SchedulingPriorityQueue {
1618 std::vector<SUnit*> Queue;
1619 unsigned CurQueueId;
1620 bool TracksRegPressure;
1623 // SUnits - The SUnits for the current graph.
1624 std::vector<SUnit> *SUnits;
1626 MachineFunction &MF;
1627 const TargetInstrInfo *TII;
1628 const TargetRegisterInfo *TRI;
1629 const TargetLowering *TLI;
1630 ScheduleDAGRRList *scheduleDAG;
1632 // SethiUllmanNumbers - The SethiUllman number for each node.
1633 std::vector<unsigned> SethiUllmanNumbers;
1635 /// RegPressure - Tracking current reg pressure per register class.
1637 std::vector<unsigned> RegPressure;
1639 /// RegLimit - Tracking the number of allocatable registers per register
1641 std::vector<unsigned> RegLimit;
1644 RegReductionPQBase(MachineFunction &mf,
1645 bool hasReadyFilter,
1648 const TargetInstrInfo *tii,
1649 const TargetRegisterInfo *tri,
1650 const TargetLowering *tli)
1651 : SchedulingPriorityQueue(hasReadyFilter),
1652 CurQueueId(0), TracksRegPressure(tracksrp), SrcOrder(srcorder),
1653 MF(mf), TII(tii), TRI(tri), TLI(tli), scheduleDAG(nullptr) {
1654 if (TracksRegPressure) {
1655 unsigned NumRC = TRI->getNumRegClasses();
1656 RegLimit.resize(NumRC);
1657 RegPressure.resize(NumRC);
1658 std::fill(RegLimit.begin(), RegLimit.end(), 0);
1659 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1660 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1661 E = TRI->regclass_end(); I != E; ++I)
1662 RegLimit[(*I)->getID()] = tri->getRegPressureLimit(*I, MF);
1666 void setScheduleDAG(ScheduleDAGRRList *scheduleDag) {
1667 scheduleDAG = scheduleDag;
1670 ScheduleHazardRecognizer* getHazardRec() {
1671 return scheduleDAG->getHazardRec();
1674 void initNodes(std::vector<SUnit> &sunits) override;
1676 void addNode(const SUnit *SU) override;
1678 void updateNode(const SUnit *SU) override;
1680 void releaseState() override {
1682 SethiUllmanNumbers.clear();
1683 std::fill(RegPressure.begin(), RegPressure.end(), 0);
1686 unsigned getNodePriority(const SUnit *SU) const;
1688 unsigned getNodeOrdering(const SUnit *SU) const {
1689 if (!SU->getNode()) return 0;
1691 return SU->getNode()->getIROrder();
1694 bool empty() const override { return Queue.empty(); }
1696 void push(SUnit *U) override {
1697 assert(!U->NodeQueueId && "Node in the queue already");
1698 U->NodeQueueId = ++CurQueueId;
1702 void remove(SUnit *SU) override {
1703 assert(!Queue.empty() && "Queue is empty!");
1704 assert(SU->NodeQueueId != 0 && "Not in queue!");
1705 std::vector<SUnit *>::iterator I = std::find(Queue.begin(), Queue.end(),
1707 if (I != std::prev(Queue.end()))
1708 std::swap(*I, Queue.back());
1710 SU->NodeQueueId = 0;
1713 bool tracksRegPressure() const override { return TracksRegPressure; }
1715 void dumpRegPressure() const;
1717 bool HighRegPressure(const SUnit *SU) const;
1719 bool MayReduceRegPressure(SUnit *SU) const;
1721 int RegPressureDiff(SUnit *SU, unsigned &LiveUses) const;
1723 void scheduledNode(SUnit *SU) override;
1725 void unscheduledNode(SUnit *SU) override;
1728 bool canClobber(const SUnit *SU, const SUnit *Op);
1729 void AddPseudoTwoAddrDeps();
1730 void PrescheduleNodesWithMultipleUses();
1731 void CalculateSethiUllmanNumbers();
1735 static SUnit *popFromQueueImpl(std::vector<SUnit*> &Q, SF &Picker) {
1736 std::vector<SUnit *>::iterator Best = Q.begin();
1737 for (std::vector<SUnit *>::iterator I = std::next(Q.begin()),
1738 E = Q.end(); I != E; ++I)
1739 if (Picker(*Best, *I))
1742 if (Best != std::prev(Q.end()))
1743 std::swap(*Best, Q.back());
1749 SUnit *popFromQueue(std::vector<SUnit*> &Q, SF &Picker, ScheduleDAG *DAG) {
1751 if (DAG->StressSched) {
1752 reverse_sort<SF> RPicker(Picker);
1753 return popFromQueueImpl(Q, RPicker);
1757 return popFromQueueImpl(Q, Picker);
1761 class RegReductionPriorityQueue : public RegReductionPQBase {
1765 RegReductionPriorityQueue(MachineFunction &mf,
1768 const TargetInstrInfo *tii,
1769 const TargetRegisterInfo *tri,
1770 const TargetLowering *tli)
1771 : RegReductionPQBase(mf, SF::HasReadyFilter, tracksrp, srcorder,
1775 bool isBottomUp() const override { return SF::IsBottomUp; }
1777 bool isReady(SUnit *U) const override {
1778 return Picker.HasReadyFilter && Picker.isReady(U, getCurCycle());
1781 SUnit *pop() override {
1782 if (Queue.empty()) return nullptr;
1784 SUnit *V = popFromQueue(Queue, Picker, scheduleDAG);
1789 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1790 void dump(ScheduleDAG *DAG) const override {
1791 // Emulate pop() without clobbering NodeQueueIds.
1792 std::vector<SUnit*> DumpQueue = Queue;
1793 SF DumpPicker = Picker;
1794 while (!DumpQueue.empty()) {
1795 SUnit *SU = popFromQueue(DumpQueue, DumpPicker, scheduleDAG);
1796 dbgs() << "Height " << SU->getHeight() << ": ";
1803 typedef RegReductionPriorityQueue<bu_ls_rr_sort>
1804 BURegReductionPriorityQueue;
1806 typedef RegReductionPriorityQueue<src_ls_rr_sort>
1807 SrcRegReductionPriorityQueue;
1809 typedef RegReductionPriorityQueue<hybrid_ls_rr_sort>
1810 HybridBURRPriorityQueue;
1812 typedef RegReductionPriorityQueue<ilp_ls_rr_sort>
1813 ILPBURRPriorityQueue;
1814 } // end anonymous namespace
1816 //===----------------------------------------------------------------------===//
1817 // Static Node Priority for Register Pressure Reduction
1818 //===----------------------------------------------------------------------===//
1820 // Check for special nodes that bypass scheduling heuristics.
1821 // Currently this pushes TokenFactor nodes down, but may be used for other
1822 // pseudo-ops as well.
1824 // Return -1 to schedule right above left, 1 for left above right.
1825 // Return 0 if no bias exists.
1826 static int checkSpecialNodes(const SUnit *left, const SUnit *right) {
1827 bool LSchedLow = left->isScheduleLow;
1828 bool RSchedLow = right->isScheduleLow;
1829 if (LSchedLow != RSchedLow)
1830 return LSchedLow < RSchedLow ? 1 : -1;
1834 /// CalcNodeSethiUllmanNumber - Compute Sethi Ullman number.
1835 /// Smaller number is the higher priority.
1837 CalcNodeSethiUllmanNumber(const SUnit *SU, std::vector<unsigned> &SUNumbers) {
1838 unsigned &SethiUllmanNumber = SUNumbers[SU->NodeNum];
1839 if (SethiUllmanNumber != 0)
1840 return SethiUllmanNumber;
1843 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
1845 if (I->isCtrl()) continue; // ignore chain preds
1846 SUnit *PredSU = I->getSUnit();
1847 unsigned PredSethiUllman = CalcNodeSethiUllmanNumber(PredSU, SUNumbers);
1848 if (PredSethiUllman > SethiUllmanNumber) {
1849 SethiUllmanNumber = PredSethiUllman;
1851 } else if (PredSethiUllman == SethiUllmanNumber)
1855 SethiUllmanNumber += Extra;
1857 if (SethiUllmanNumber == 0)
1858 SethiUllmanNumber = 1;
1860 return SethiUllmanNumber;
1863 /// CalculateSethiUllmanNumbers - Calculate Sethi-Ullman numbers of all
1864 /// scheduling units.
1865 void RegReductionPQBase::CalculateSethiUllmanNumbers() {
1866 SethiUllmanNumbers.assign(SUnits->size(), 0);
1868 for (unsigned i = 0, e = SUnits->size(); i != e; ++i)
1869 CalcNodeSethiUllmanNumber(&(*SUnits)[i], SethiUllmanNumbers);
1872 void RegReductionPQBase::addNode(const SUnit *SU) {
1873 unsigned SUSize = SethiUllmanNumbers.size();
1874 if (SUnits->size() > SUSize)
1875 SethiUllmanNumbers.resize(SUSize*2, 0);
1876 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1879 void RegReductionPQBase::updateNode(const SUnit *SU) {
1880 SethiUllmanNumbers[SU->NodeNum] = 0;
1881 CalcNodeSethiUllmanNumber(SU, SethiUllmanNumbers);
1884 // Lower priority means schedule further down. For bottom-up scheduling, lower
1885 // priority SUs are scheduled before higher priority SUs.
1886 unsigned RegReductionPQBase::getNodePriority(const SUnit *SU) const {
1887 assert(SU->NodeNum < SethiUllmanNumbers.size());
1888 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
1889 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
1890 // CopyToReg should be close to its uses to facilitate coalescing and
1893 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
1894 Opc == TargetOpcode::SUBREG_TO_REG ||
1895 Opc == TargetOpcode::INSERT_SUBREG)
1896 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
1897 // close to their uses to facilitate coalescing.
1899 if (SU->NumSuccs == 0 && SU->NumPreds != 0)
1900 // If SU does not have a register use, i.e. it doesn't produce a value
1901 // that would be consumed (e.g. store), then it terminates a chain of
1902 // computation. Give it a large SethiUllman number so it will be
1903 // scheduled right before its predecessors that it doesn't lengthen
1904 // their live ranges.
1906 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
1907 // If SU does not have a register def, schedule it close to its uses
1908 // because it does not lengthen any live ranges.
1911 return SethiUllmanNumbers[SU->NodeNum];
1913 unsigned Priority = SethiUllmanNumbers[SU->NodeNum];
1915 // FIXME: This assumes all of the defs are used as call operands.
1916 int NP = (int)Priority - SU->getNode()->getNumValues();
1917 return (NP > 0) ? NP : 0;
1923 //===----------------------------------------------------------------------===//
1924 // Register Pressure Tracking
1925 //===----------------------------------------------------------------------===//
1927 void RegReductionPQBase::dumpRegPressure() const {
1928 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
1929 for (TargetRegisterInfo::regclass_iterator I = TRI->regclass_begin(),
1930 E = TRI->regclass_end(); I != E; ++I) {
1931 const TargetRegisterClass *RC = *I;
1932 unsigned Id = RC->getID();
1933 unsigned RP = RegPressure[Id];
1935 DEBUG(dbgs() << RC->getName() << ": " << RP << " / " << RegLimit[Id]
1941 bool RegReductionPQBase::HighRegPressure(const SUnit *SU) const {
1945 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1949 SUnit *PredSU = I->getSUnit();
1950 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
1951 // to cover the number of registers defined (they are all live).
1952 if (PredSU->NumRegDefsLeft == 0) {
1955 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
1956 RegDefPos.IsValid(); RegDefPos.Advance()) {
1957 unsigned RCId, Cost;
1958 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
1960 if ((RegPressure[RCId] + Cost) >= RegLimit[RCId])
1967 bool RegReductionPQBase::MayReduceRegPressure(SUnit *SU) const {
1968 const SDNode *N = SU->getNode();
1970 if (!N->isMachineOpcode() || !SU->NumSuccs)
1973 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
1974 for (unsigned i = 0; i != NumDefs; ++i) {
1975 MVT VT = N->getSimpleValueType(i);
1976 if (!N->hasAnyUseOfValue(i))
1978 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
1979 if (RegPressure[RCId] >= RegLimit[RCId])
1985 // Compute the register pressure contribution by this instruction by count up
1986 // for uses that are not live and down for defs. Only count register classes
1987 // that are already under high pressure. As a side effect, compute the number of
1988 // uses of registers that are already live.
1990 // FIXME: This encompasses the logic in HighRegPressure and MayReduceRegPressure
1991 // so could probably be factored.
1992 int RegReductionPQBase::RegPressureDiff(SUnit *SU, unsigned &LiveUses) const {
1995 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
1999 SUnit *PredSU = I->getSUnit();
2000 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2001 // to cover the number of registers defined (they are all live).
2002 if (PredSU->NumRegDefsLeft == 0) {
2003 if (PredSU->getNode()->isMachineOpcode())
2007 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2008 RegDefPos.IsValid(); RegDefPos.Advance()) {
2009 MVT VT = RegDefPos.GetValue();
2010 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2011 if (RegPressure[RCId] >= RegLimit[RCId])
2015 const SDNode *N = SU->getNode();
2017 if (!N || !N->isMachineOpcode() || !SU->NumSuccs)
2020 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2021 for (unsigned i = 0; i != NumDefs; ++i) {
2022 MVT VT = N->getSimpleValueType(i);
2023 if (!N->hasAnyUseOfValue(i))
2025 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2026 if (RegPressure[RCId] >= RegLimit[RCId])
2032 void RegReductionPQBase::scheduledNode(SUnit *SU) {
2033 if (!TracksRegPressure)
2039 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2043 SUnit *PredSU = I->getSUnit();
2044 // NumRegDefsLeft is zero when enough uses of this node have been scheduled
2045 // to cover the number of registers defined (they are all live).
2046 if (PredSU->NumRegDefsLeft == 0) {
2049 // FIXME: The ScheduleDAG currently loses information about which of a
2050 // node's values is consumed by each dependence. Consequently, if the node
2051 // defines multiple register classes, we don't know which to pressurize
2052 // here. Instead the following loop consumes the register defs in an
2053 // arbitrary order. At least it handles the common case of clustered loads
2054 // to the same class. For precise liveness, each SDep needs to indicate the
2055 // result number. But that tightly couples the ScheduleDAG with the
2056 // SelectionDAG making updates tricky. A simpler hack would be to attach a
2057 // value type or register class to SDep.
2059 // The most important aspect of register tracking is balancing the increase
2060 // here with the reduction further below. Note that this SU may use multiple
2061 // defs in PredSU. The can't be determined here, but we've already
2062 // compensated by reducing NumRegDefsLeft in PredSU during
2063 // ScheduleDAGSDNodes::AddSchedEdges.
2064 --PredSU->NumRegDefsLeft;
2065 unsigned SkipRegDefs = PredSU->NumRegDefsLeft;
2066 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(PredSU, scheduleDAG);
2067 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2071 unsigned RCId, Cost;
2072 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2073 RegPressure[RCId] += Cost;
2078 // We should have this assert, but there may be dead SDNodes that never
2079 // materialize as SUnits, so they don't appear to generate liveness.
2080 //assert(SU->NumRegDefsLeft == 0 && "not all regdefs have scheduled uses");
2081 int SkipRegDefs = (int)SU->NumRegDefsLeft;
2082 for (ScheduleDAGSDNodes::RegDefIter RegDefPos(SU, scheduleDAG);
2083 RegDefPos.IsValid(); RegDefPos.Advance(), --SkipRegDefs) {
2084 if (SkipRegDefs > 0)
2086 unsigned RCId, Cost;
2087 GetCostForDef(RegDefPos, TLI, TII, TRI, RCId, Cost, MF);
2088 if (RegPressure[RCId] < Cost) {
2089 // Register pressure tracking is imprecise. This can happen. But we try
2090 // hard not to let it happen because it likely results in poor scheduling.
2091 DEBUG(dbgs() << " SU(" << SU->NodeNum << ") has too many regdefs\n");
2092 RegPressure[RCId] = 0;
2095 RegPressure[RCId] -= Cost;
2101 void RegReductionPQBase::unscheduledNode(SUnit *SU) {
2102 if (!TracksRegPressure)
2105 const SDNode *N = SU->getNode();
2108 if (!N->isMachineOpcode()) {
2109 if (N->getOpcode() != ISD::CopyToReg)
2112 unsigned Opc = N->getMachineOpcode();
2113 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2114 Opc == TargetOpcode::INSERT_SUBREG ||
2115 Opc == TargetOpcode::SUBREG_TO_REG ||
2116 Opc == TargetOpcode::REG_SEQUENCE ||
2117 Opc == TargetOpcode::IMPLICIT_DEF)
2121 for (SUnit::pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2125 SUnit *PredSU = I->getSUnit();
2126 // NumSuccsLeft counts all deps. Don't compare it with NumSuccs which only
2127 // counts data deps.
2128 if (PredSU->NumSuccsLeft != PredSU->Succs.size())
2130 const SDNode *PN = PredSU->getNode();
2131 if (!PN->isMachineOpcode()) {
2132 if (PN->getOpcode() == ISD::CopyFromReg) {
2133 MVT VT = PN->getSimpleValueType(0);
2134 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2135 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2139 unsigned POpc = PN->getMachineOpcode();
2140 if (POpc == TargetOpcode::IMPLICIT_DEF)
2142 if (POpc == TargetOpcode::EXTRACT_SUBREG ||
2143 POpc == TargetOpcode::INSERT_SUBREG ||
2144 POpc == TargetOpcode::SUBREG_TO_REG) {
2145 MVT VT = PN->getSimpleValueType(0);
2146 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2147 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2150 unsigned NumDefs = TII->get(PN->getMachineOpcode()).getNumDefs();
2151 for (unsigned i = 0; i != NumDefs; ++i) {
2152 MVT VT = PN->getSimpleValueType(i);
2153 if (!PN->hasAnyUseOfValue(i))
2155 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2156 if (RegPressure[RCId] < TLI->getRepRegClassCostFor(VT))
2157 // Register pressure tracking is imprecise. This can happen.
2158 RegPressure[RCId] = 0;
2160 RegPressure[RCId] -= TLI->getRepRegClassCostFor(VT);
2164 // Check for isMachineOpcode() as PrescheduleNodesWithMultipleUses()
2165 // may transfer data dependencies to CopyToReg.
2166 if (SU->NumSuccs && N->isMachineOpcode()) {
2167 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2168 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2169 MVT VT = N->getSimpleValueType(i);
2170 if (VT == MVT::Glue || VT == MVT::Other)
2172 if (!N->hasAnyUseOfValue(i))
2174 unsigned RCId = TLI->getRepRegClassFor(VT)->getID();
2175 RegPressure[RCId] += TLI->getRepRegClassCostFor(VT);
2182 //===----------------------------------------------------------------------===//
2183 // Dynamic Node Priority for Register Pressure Reduction
2184 //===----------------------------------------------------------------------===//
2186 /// closestSucc - Returns the scheduled cycle of the successor which is
2187 /// closest to the current cycle.
2188 static unsigned closestSucc(const SUnit *SU) {
2189 unsigned MaxHeight = 0;
2190 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2192 if (I->isCtrl()) continue; // ignore chain succs
2193 unsigned Height = I->getSUnit()->getHeight();
2194 // If there are bunch of CopyToRegs stacked up, they should be considered
2195 // to be at the same position.
2196 if (I->getSUnit()->getNode() &&
2197 I->getSUnit()->getNode()->getOpcode() == ISD::CopyToReg)
2198 Height = closestSucc(I->getSUnit())+1;
2199 if (Height > MaxHeight)
2205 /// calcMaxScratches - Returns an cost estimate of the worse case requirement
2206 /// for scratch registers, i.e. number of data dependencies.
2207 static unsigned calcMaxScratches(const SUnit *SU) {
2208 unsigned Scratches = 0;
2209 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2211 if (I->isCtrl()) continue; // ignore chain preds
2217 /// hasOnlyLiveInOpers - Return true if SU has only value predecessors that are
2218 /// CopyFromReg from a virtual register.
2219 static bool hasOnlyLiveInOpers(const SUnit *SU) {
2220 bool RetVal = false;
2221 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2223 if (I->isCtrl()) continue;
2224 const SUnit *PredSU = I->getSUnit();
2225 if (PredSU->getNode() &&
2226 PredSU->getNode()->getOpcode() == ISD::CopyFromReg) {
2228 cast<RegisterSDNode>(PredSU->getNode()->getOperand(1))->getReg();
2229 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2239 /// hasOnlyLiveOutUses - Return true if SU has only value successors that are
2240 /// CopyToReg to a virtual register. This SU def is probably a liveout and
2241 /// it has no other use. It should be scheduled closer to the terminator.
2242 static bool hasOnlyLiveOutUses(const SUnit *SU) {
2243 bool RetVal = false;
2244 for (SUnit::const_succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
2246 if (I->isCtrl()) continue;
2247 const SUnit *SuccSU = I->getSUnit();
2248 if (SuccSU->getNode() && SuccSU->getNode()->getOpcode() == ISD::CopyToReg) {
2250 cast<RegisterSDNode>(SuccSU->getNode()->getOperand(1))->getReg();
2251 if (TargetRegisterInfo::isVirtualRegister(Reg)) {
2261 // Set isVRegCycle for a node with only live in opers and live out uses. Also
2262 // set isVRegCycle for its CopyFromReg operands.
2264 // This is only relevant for single-block loops, in which case the VRegCycle
2265 // node is likely an induction variable in which the operand and target virtual
2266 // registers should be coalesced (e.g. pre/post increment values). Setting the
2267 // isVRegCycle flag helps the scheduler prioritize other uses of the same
2268 // CopyFromReg so that this node becomes the virtual register "kill". This
2269 // avoids interference between the values live in and out of the block and
2270 // eliminates a copy inside the loop.
2271 static void initVRegCycle(SUnit *SU) {
2272 if (DisableSchedVRegCycle)
2275 if (!hasOnlyLiveInOpers(SU) || !hasOnlyLiveOutUses(SU))
2278 DEBUG(dbgs() << "VRegCycle: SU(" << SU->NodeNum << ")\n");
2280 SU->isVRegCycle = true;
2282 for (SUnit::const_pred_iterator I = SU->Preds.begin(), E = SU->Preds.end();
2284 if (I->isCtrl()) continue;
2285 I->getSUnit()->isVRegCycle = true;
2289 // After scheduling the definition of a VRegCycle, clear the isVRegCycle flag of
2290 // CopyFromReg operands. We should no longer penalize other uses of this VReg.
2291 static void resetVRegCycle(SUnit *SU) {
2292 if (!SU->isVRegCycle)
2295 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2297 if (I->isCtrl()) continue; // ignore chain preds
2298 SUnit *PredSU = I->getSUnit();
2299 if (PredSU->isVRegCycle) {
2300 assert(PredSU->getNode()->getOpcode() == ISD::CopyFromReg &&
2301 "VRegCycle def must be CopyFromReg");
2302 I->getSUnit()->isVRegCycle = 0;
2307 // Return true if this SUnit uses a CopyFromReg node marked as a VRegCycle. This
2308 // means a node that defines the VRegCycle has not been scheduled yet.
2309 static bool hasVRegCycleUse(const SUnit *SU) {
2310 // If this SU also defines the VReg, don't hoist it as a "use".
2311 if (SU->isVRegCycle)
2314 for (SUnit::const_pred_iterator I = SU->Preds.begin(),E = SU->Preds.end();
2316 if (I->isCtrl()) continue; // ignore chain preds
2317 if (I->getSUnit()->isVRegCycle &&
2318 I->getSUnit()->getNode()->getOpcode() == ISD::CopyFromReg) {
2319 DEBUG(dbgs() << " VReg cycle use: SU (" << SU->NodeNum << ")\n");
2326 // Check for either a dependence (latency) or resource (hazard) stall.
2328 // Note: The ScheduleHazardRecognizer interface requires a non-const SU.
2329 static bool BUHasStall(SUnit *SU, int Height, RegReductionPQBase *SPQ) {
2330 if ((int)SPQ->getCurCycle() < Height) return true;
2331 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2332 != ScheduleHazardRecognizer::NoHazard)
2337 // Return -1 if left has higher priority, 1 if right has higher priority.
2338 // Return 0 if latency-based priority is equivalent.
2339 static int BUCompareLatency(SUnit *left, SUnit *right, bool checkPref,
2340 RegReductionPQBase *SPQ) {
2341 // Scheduling an instruction that uses a VReg whose postincrement has not yet
2342 // been scheduled will induce a copy. Model this as an extra cycle of latency.
2343 int LPenalty = hasVRegCycleUse(left) ? 1 : 0;
2344 int RPenalty = hasVRegCycleUse(right) ? 1 : 0;
2345 int LHeight = (int)left->getHeight() + LPenalty;
2346 int RHeight = (int)right->getHeight() + RPenalty;
2348 bool LStall = (!checkPref || left->SchedulingPref == Sched::ILP) &&
2349 BUHasStall(left, LHeight, SPQ);
2350 bool RStall = (!checkPref || right->SchedulingPref == Sched::ILP) &&
2351 BUHasStall(right, RHeight, SPQ);
2353 // If scheduling one of the node will cause a pipeline stall, delay it.
2354 // If scheduling either one of the node will cause a pipeline stall, sort
2355 // them according to their height.
2359 if (LHeight != RHeight)
2360 return LHeight > RHeight ? 1 : -1;
2364 // If either node is scheduling for latency, sort them by height/depth
2366 if (!checkPref || (left->SchedulingPref == Sched::ILP ||
2367 right->SchedulingPref == Sched::ILP)) {
2368 // If neither instruction stalls (!LStall && !RStall) and HazardRecognizer
2369 // is enabled, grouping instructions by cycle, then its height is already
2370 // covered so only its depth matters. We also reach this point if both stall
2371 // but have the same height.
2372 if (!SPQ->getHazardRec()->isEnabled()) {
2373 if (LHeight != RHeight)
2374 return LHeight > RHeight ? 1 : -1;
2376 int LDepth = left->getDepth() - LPenalty;
2377 int RDepth = right->getDepth() - RPenalty;
2378 if (LDepth != RDepth) {
2379 DEBUG(dbgs() << " Comparing latency of SU (" << left->NodeNum
2380 << ") depth " << LDepth << " vs SU (" << right->NodeNum
2381 << ") depth " << RDepth << "\n");
2382 return LDepth < RDepth ? 1 : -1;
2384 if (left->Latency != right->Latency)
2385 return left->Latency > right->Latency ? 1 : -1;
2390 static bool BURRSort(SUnit *left, SUnit *right, RegReductionPQBase *SPQ) {
2391 // Schedule physical register definitions close to their use. This is
2392 // motivated by microarchitectures that can fuse cmp+jump macro-ops. But as
2393 // long as shortening physreg live ranges is generally good, we can defer
2394 // creating a subtarget hook.
2395 if (!DisableSchedPhysRegJoin) {
2396 bool LHasPhysReg = left->hasPhysRegDefs;
2397 bool RHasPhysReg = right->hasPhysRegDefs;
2398 if (LHasPhysReg != RHasPhysReg) {
2400 static const char *const PhysRegMsg[] = { " has no physreg",
2401 " defines a physreg" };
2403 DEBUG(dbgs() << " SU (" << left->NodeNum << ") "
2404 << PhysRegMsg[LHasPhysReg] << " SU(" << right->NodeNum << ") "
2405 << PhysRegMsg[RHasPhysReg] << "\n");
2406 return LHasPhysReg < RHasPhysReg;
2410 // Prioritize by Sethi-Ulmann number and push CopyToReg nodes down.
2411 unsigned LPriority = SPQ->getNodePriority(left);
2412 unsigned RPriority = SPQ->getNodePriority(right);
2414 // Be really careful about hoisting call operands above previous calls.
2415 // Only allows it if it would reduce register pressure.
2416 if (left->isCall && right->isCallOp) {
2417 unsigned RNumVals = right->getNode()->getNumValues();
2418 RPriority = (RPriority > RNumVals) ? (RPriority - RNumVals) : 0;
2420 if (right->isCall && left->isCallOp) {
2421 unsigned LNumVals = left->getNode()->getNumValues();
2422 LPriority = (LPriority > LNumVals) ? (LPriority - LNumVals) : 0;
2425 if (LPriority != RPriority)
2426 return LPriority > RPriority;
2428 // One or both of the nodes are calls and their sethi-ullman numbers are the
2429 // same, then keep source order.
2430 if (left->isCall || right->isCall) {
2431 unsigned LOrder = SPQ->getNodeOrdering(left);
2432 unsigned ROrder = SPQ->getNodeOrdering(right);
2434 // Prefer an ordering where the lower the non-zero order number, the higher
2436 if ((LOrder || ROrder) && LOrder != ROrder)
2437 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2440 // Try schedule def + use closer when Sethi-Ullman numbers are the same.
2445 // and the following instructions are both ready.
2449 // Then schedule t2 = op first.
2456 // This creates more short live intervals.
2457 unsigned LDist = closestSucc(left);
2458 unsigned RDist = closestSucc(right);
2460 return LDist < RDist;
2462 // How many registers becomes live when the node is scheduled.
2463 unsigned LScratch = calcMaxScratches(left);
2464 unsigned RScratch = calcMaxScratches(right);
2465 if (LScratch != RScratch)
2466 return LScratch > RScratch;
2468 // Comparing latency against a call makes little sense unless the node
2469 // is register pressure-neutral.
2470 if ((left->isCall && RPriority > 0) || (right->isCall && LPriority > 0))
2471 return (left->NodeQueueId > right->NodeQueueId);
2473 // Do not compare latencies when one or both of the nodes are calls.
2474 if (!DisableSchedCycles &&
2475 !(left->isCall || right->isCall)) {
2476 int result = BUCompareLatency(left, right, false /*checkPref*/, SPQ);
2481 if (left->getHeight() != right->getHeight())
2482 return left->getHeight() > right->getHeight();
2484 if (left->getDepth() != right->getDepth())
2485 return left->getDepth() < right->getDepth();
2488 assert(left->NodeQueueId && right->NodeQueueId &&
2489 "NodeQueueId cannot be zero");
2490 return (left->NodeQueueId > right->NodeQueueId);
2494 bool bu_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2495 if (int res = checkSpecialNodes(left, right))
2498 return BURRSort(left, right, SPQ);
2501 // Source order, otherwise bottom up.
2502 bool src_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2503 if (int res = checkSpecialNodes(left, right))
2506 unsigned LOrder = SPQ->getNodeOrdering(left);
2507 unsigned ROrder = SPQ->getNodeOrdering(right);
2509 // Prefer an ordering where the lower the non-zero order number, the higher
2511 if ((LOrder || ROrder) && LOrder != ROrder)
2512 return LOrder != 0 && (LOrder < ROrder || ROrder == 0);
2514 return BURRSort(left, right, SPQ);
2517 // If the time between now and when the instruction will be ready can cover
2518 // the spill code, then avoid adding it to the ready queue. This gives long
2519 // stalls highest priority and allows hoisting across calls. It should also
2520 // speed up processing the available queue.
2521 bool hybrid_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2522 static const unsigned ReadyDelay = 3;
2524 if (SPQ->MayReduceRegPressure(SU)) return true;
2526 if (SU->getHeight() > (CurCycle + ReadyDelay)) return false;
2528 if (SPQ->getHazardRec()->getHazardType(SU, -ReadyDelay)
2529 != ScheduleHazardRecognizer::NoHazard)
2535 // Return true if right should be scheduled with higher priority than left.
2536 bool hybrid_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2537 if (int res = checkSpecialNodes(left, right))
2540 if (left->isCall || right->isCall)
2541 // No way to compute latency of calls.
2542 return BURRSort(left, right, SPQ);
2544 bool LHigh = SPQ->HighRegPressure(left);
2545 bool RHigh = SPQ->HighRegPressure(right);
2546 // Avoid causing spills. If register pressure is high, schedule for
2547 // register pressure reduction.
2548 if (LHigh && !RHigh) {
2549 DEBUG(dbgs() << " pressure SU(" << left->NodeNum << ") > SU("
2550 << right->NodeNum << ")\n");
2553 else if (!LHigh && RHigh) {
2554 DEBUG(dbgs() << " pressure SU(" << right->NodeNum << ") > SU("
2555 << left->NodeNum << ")\n");
2558 if (!LHigh && !RHigh) {
2559 int result = BUCompareLatency(left, right, true /*checkPref*/, SPQ);
2563 return BURRSort(left, right, SPQ);
2566 // Schedule as many instructions in each cycle as possible. So don't make an
2567 // instruction available unless it is ready in the current cycle.
2568 bool ilp_ls_rr_sort::isReady(SUnit *SU, unsigned CurCycle) const {
2569 if (SU->getHeight() > CurCycle) return false;
2571 if (SPQ->getHazardRec()->getHazardType(SU, 0)
2572 != ScheduleHazardRecognizer::NoHazard)
2578 static bool canEnableCoalescing(SUnit *SU) {
2579 unsigned Opc = SU->getNode() ? SU->getNode()->getOpcode() : 0;
2580 if (Opc == ISD::TokenFactor || Opc == ISD::CopyToReg)
2581 // CopyToReg should be close to its uses to facilitate coalescing and
2585 if (Opc == TargetOpcode::EXTRACT_SUBREG ||
2586 Opc == TargetOpcode::SUBREG_TO_REG ||
2587 Opc == TargetOpcode::INSERT_SUBREG)
2588 // EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG nodes should be
2589 // close to their uses to facilitate coalescing.
2592 if (SU->NumPreds == 0 && SU->NumSuccs != 0)
2593 // If SU does not have a register def, schedule it close to its uses
2594 // because it does not lengthen any live ranges.
2600 // list-ilp is currently an experimental scheduler that allows various
2601 // heuristics to be enabled prior to the normal register reduction logic.
2602 bool ilp_ls_rr_sort::operator()(SUnit *left, SUnit *right) const {
2603 if (int res = checkSpecialNodes(left, right))
2606 if (left->isCall || right->isCall)
2607 // No way to compute latency of calls.
2608 return BURRSort(left, right, SPQ);
2610 unsigned LLiveUses = 0, RLiveUses = 0;
2611 int LPDiff = 0, RPDiff = 0;
2612 if (!DisableSchedRegPressure || !DisableSchedLiveUses) {
2613 LPDiff = SPQ->RegPressureDiff(left, LLiveUses);
2614 RPDiff = SPQ->RegPressureDiff(right, RLiveUses);
2616 if (!DisableSchedRegPressure && LPDiff != RPDiff) {
2617 DEBUG(dbgs() << "RegPressureDiff SU(" << left->NodeNum << "): " << LPDiff
2618 << " != SU(" << right->NodeNum << "): " << RPDiff << "\n");
2619 return LPDiff > RPDiff;
2622 if (!DisableSchedRegPressure && (LPDiff > 0 || RPDiff > 0)) {
2623 bool LReduce = canEnableCoalescing(left);
2624 bool RReduce = canEnableCoalescing(right);
2625 if (LReduce && !RReduce) return false;
2626 if (RReduce && !LReduce) return true;
2629 if (!DisableSchedLiveUses && (LLiveUses != RLiveUses)) {
2630 DEBUG(dbgs() << "Live uses SU(" << left->NodeNum << "): " << LLiveUses
2631 << " != SU(" << right->NodeNum << "): " << RLiveUses << "\n");
2632 return LLiveUses < RLiveUses;
2635 if (!DisableSchedStalls) {
2636 bool LStall = BUHasStall(left, left->getHeight(), SPQ);
2637 bool RStall = BUHasStall(right, right->getHeight(), SPQ);
2638 if (LStall != RStall)
2639 return left->getHeight() > right->getHeight();
2642 if (!DisableSchedCriticalPath) {
2643 int spread = (int)left->getDepth() - (int)right->getDepth();
2644 if (std::abs(spread) > MaxReorderWindow) {
2645 DEBUG(dbgs() << "Depth of SU(" << left->NodeNum << "): "
2646 << left->getDepth() << " != SU(" << right->NodeNum << "): "
2647 << right->getDepth() << "\n");
2648 return left->getDepth() < right->getDepth();
2652 if (!DisableSchedHeight && left->getHeight() != right->getHeight()) {
2653 int spread = (int)left->getHeight() - (int)right->getHeight();
2654 if (std::abs(spread) > MaxReorderWindow)
2655 return left->getHeight() > right->getHeight();
2658 return BURRSort(left, right, SPQ);
2661 void RegReductionPQBase::initNodes(std::vector<SUnit> &sunits) {
2663 // Add pseudo dependency edges for two-address nodes.
2664 if (!Disable2AddrHack)
2665 AddPseudoTwoAddrDeps();
2666 // Reroute edges to nodes with multiple uses.
2667 if (!TracksRegPressure && !SrcOrder)
2668 PrescheduleNodesWithMultipleUses();
2669 // Calculate node priorities.
2670 CalculateSethiUllmanNumbers();
2672 // For single block loops, mark nodes that look like canonical IV increments.
2673 if (scheduleDAG->BB->isSuccessor(scheduleDAG->BB)) {
2674 for (unsigned i = 0, e = sunits.size(); i != e; ++i) {
2675 initVRegCycle(&sunits[i]);
2680 //===----------------------------------------------------------------------===//
2681 // Preschedule for Register Pressure
2682 //===----------------------------------------------------------------------===//
2684 bool RegReductionPQBase::canClobber(const SUnit *SU, const SUnit *Op) {
2685 if (SU->isTwoAddress) {
2686 unsigned Opc = SU->getNode()->getMachineOpcode();
2687 const MCInstrDesc &MCID = TII->get(Opc);
2688 unsigned NumRes = MCID.getNumDefs();
2689 unsigned NumOps = MCID.getNumOperands() - NumRes;
2690 for (unsigned i = 0; i != NumOps; ++i) {
2691 if (MCID.getOperandConstraint(i+NumRes, MCOI::TIED_TO) != -1) {
2692 SDNode *DU = SU->getNode()->getOperand(i).getNode();
2693 if (DU->getNodeId() != -1 &&
2694 Op->OrigNode == &(*SUnits)[DU->getNodeId()])
2702 /// canClobberReachingPhysRegUse - True if SU would clobber one of it's
2703 /// successor's explicit physregs whose definition can reach DepSU.
2704 /// i.e. DepSU should not be scheduled above SU.
2705 static bool canClobberReachingPhysRegUse(const SUnit *DepSU, const SUnit *SU,
2706 ScheduleDAGRRList *scheduleDAG,
2707 const TargetInstrInfo *TII,
2708 const TargetRegisterInfo *TRI) {
2709 const uint16_t *ImpDefs
2710 = TII->get(SU->getNode()->getMachineOpcode()).getImplicitDefs();
2711 const uint32_t *RegMask = getNodeRegMask(SU->getNode());
2712 if(!ImpDefs && !RegMask)
2715 for (SUnit::const_succ_iterator SI = SU->Succs.begin(), SE = SU->Succs.end();
2717 SUnit *SuccSU = SI->getSUnit();
2718 for (SUnit::const_pred_iterator PI = SuccSU->Preds.begin(),
2719 PE = SuccSU->Preds.end(); PI != PE; ++PI) {
2720 if (!PI->isAssignedRegDep())
2723 if (RegMask && MachineOperand::clobbersPhysReg(RegMask, PI->getReg()) &&
2724 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2728 for (const uint16_t *ImpDef = ImpDefs; *ImpDef; ++ImpDef)
2729 // Return true if SU clobbers this physical register use and the
2730 // definition of the register reaches from DepSU. IsReachable queries
2731 // a topological forward sort of the DAG (following the successors).
2732 if (TRI->regsOverlap(*ImpDef, PI->getReg()) &&
2733 scheduleDAG->IsReachable(DepSU, PI->getSUnit()))
2740 /// canClobberPhysRegDefs - True if SU would clobber one of SuccSU's
2741 /// physical register defs.
2742 static bool canClobberPhysRegDefs(const SUnit *SuccSU, const SUnit *SU,
2743 const TargetInstrInfo *TII,
2744 const TargetRegisterInfo *TRI) {
2745 SDNode *N = SuccSU->getNode();
2746 unsigned NumDefs = TII->get(N->getMachineOpcode()).getNumDefs();
2747 const uint16_t *ImpDefs = TII->get(N->getMachineOpcode()).getImplicitDefs();
2748 assert(ImpDefs && "Caller should check hasPhysRegDefs");
2749 for (const SDNode *SUNode = SU->getNode(); SUNode;
2750 SUNode = SUNode->getGluedNode()) {
2751 if (!SUNode->isMachineOpcode())
2753 const uint16_t *SUImpDefs =
2754 TII->get(SUNode->getMachineOpcode()).getImplicitDefs();
2755 const uint32_t *SURegMask = getNodeRegMask(SUNode);
2756 if (!SUImpDefs && !SURegMask)
2758 for (unsigned i = NumDefs, e = N->getNumValues(); i != e; ++i) {
2759 EVT VT = N->getValueType(i);
2760 if (VT == MVT::Glue || VT == MVT::Other)
2762 if (!N->hasAnyUseOfValue(i))
2764 unsigned Reg = ImpDefs[i - NumDefs];
2765 if (SURegMask && MachineOperand::clobbersPhysReg(SURegMask, Reg))
2769 for (;*SUImpDefs; ++SUImpDefs) {
2770 unsigned SUReg = *SUImpDefs;
2771 if (TRI->regsOverlap(Reg, SUReg))
2779 /// PrescheduleNodesWithMultipleUses - Nodes with multiple uses
2780 /// are not handled well by the general register pressure reduction
2781 /// heuristics. When presented with code like this:
2790 /// the heuristics tend to push the store up, but since the
2791 /// operand of the store has another use (U), this would increase
2792 /// the length of that other use (the U->N edge).
2794 /// This function transforms code like the above to route U's
2795 /// dependence through the store when possible, like this:
2806 /// This results in the store being scheduled immediately
2807 /// after N, which shortens the U->N live range, reducing
2808 /// register pressure.
2810 void RegReductionPQBase::PrescheduleNodesWithMultipleUses() {
2811 // Visit all the nodes in topological order, working top-down.
2812 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2813 SUnit *SU = &(*SUnits)[i];
2814 // For now, only look at nodes with no data successors, such as stores.
2815 // These are especially important, due to the heuristics in
2816 // getNodePriority for nodes with no data successors.
2817 if (SU->NumSuccs != 0)
2819 // For now, only look at nodes with exactly one data predecessor.
2820 if (SU->NumPreds != 1)
2822 // Avoid prescheduling copies to virtual registers, which don't behave
2823 // like other nodes from the perspective of scheduling heuristics.
2824 if (SDNode *N = SU->getNode())
2825 if (N->getOpcode() == ISD::CopyToReg &&
2826 TargetRegisterInfo::isVirtualRegister
2827 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2830 // Locate the single data predecessor.
2831 SUnit *PredSU = nullptr;
2832 for (SUnit::const_pred_iterator II = SU->Preds.begin(),
2833 EE = SU->Preds.end(); II != EE; ++II)
2834 if (!II->isCtrl()) {
2835 PredSU = II->getSUnit();
2840 // Don't rewrite edges that carry physregs, because that requires additional
2841 // support infrastructure.
2842 if (PredSU->hasPhysRegDefs)
2844 // Short-circuit the case where SU is PredSU's only data successor.
2845 if (PredSU->NumSuccs == 1)
2847 // Avoid prescheduling to copies from virtual registers, which don't behave
2848 // like other nodes from the perspective of scheduling heuristics.
2849 if (SDNode *N = SU->getNode())
2850 if (N->getOpcode() == ISD::CopyFromReg &&
2851 TargetRegisterInfo::isVirtualRegister
2852 (cast<RegisterSDNode>(N->getOperand(1))->getReg()))
2855 // Perform checks on the successors of PredSU.
2856 for (SUnit::const_succ_iterator II = PredSU->Succs.begin(),
2857 EE = PredSU->Succs.end(); II != EE; ++II) {
2858 SUnit *PredSuccSU = II->getSUnit();
2859 if (PredSuccSU == SU) continue;
2860 // If PredSU has another successor with no data successors, for
2861 // now don't attempt to choose either over the other.
2862 if (PredSuccSU->NumSuccs == 0)
2863 goto outer_loop_continue;
2864 // Don't break physical register dependencies.
2865 if (SU->hasPhysRegClobbers && PredSuccSU->hasPhysRegDefs)
2866 if (canClobberPhysRegDefs(PredSuccSU, SU, TII, TRI))
2867 goto outer_loop_continue;
2868 // Don't introduce graph cycles.
2869 if (scheduleDAG->IsReachable(SU, PredSuccSU))
2870 goto outer_loop_continue;
2873 // Ok, the transformation is safe and the heuristics suggest it is
2874 // profitable. Update the graph.
2875 DEBUG(dbgs() << " Prescheduling SU #" << SU->NodeNum
2876 << " next to PredSU #" << PredSU->NodeNum
2877 << " to guide scheduling in the presence of multiple uses\n");
2878 for (unsigned i = 0; i != PredSU->Succs.size(); ++i) {
2879 SDep Edge = PredSU->Succs[i];
2880 assert(!Edge.isAssignedRegDep());
2881 SUnit *SuccSU = Edge.getSUnit();
2883 Edge.setSUnit(PredSU);
2884 scheduleDAG->RemovePred(SuccSU, Edge);
2885 scheduleDAG->AddPred(SU, Edge);
2887 scheduleDAG->AddPred(SuccSU, Edge);
2891 outer_loop_continue:;
2895 /// AddPseudoTwoAddrDeps - If two nodes share an operand and one of them uses
2896 /// it as a def&use operand. Add a pseudo control edge from it to the other
2897 /// node (if it won't create a cycle) so the two-address one will be scheduled
2898 /// first (lower in the schedule). If both nodes are two-address, favor the
2899 /// one that has a CopyToReg use (more likely to be a loop induction update).
2900 /// If both are two-address, but one is commutable while the other is not
2901 /// commutable, favor the one that's not commutable.
2902 void RegReductionPQBase::AddPseudoTwoAddrDeps() {
2903 for (unsigned i = 0, e = SUnits->size(); i != e; ++i) {
2904 SUnit *SU = &(*SUnits)[i];
2905 if (!SU->isTwoAddress)
2908 SDNode *Node = SU->getNode();
2909 if (!Node || !Node->isMachineOpcode() || SU->getNode()->getGluedNode())
2912 bool isLiveOut = hasOnlyLiveOutUses(SU);
2913 unsigned Opc = Node->getMachineOpcode();
2914 const MCInstrDesc &MCID = TII->get(Opc);
2915 unsigned NumRes = MCID.getNumDefs();
2916 unsigned NumOps = MCID.getNumOperands() - NumRes;
2917 for (unsigned j = 0; j != NumOps; ++j) {
2918 if (MCID.getOperandConstraint(j+NumRes, MCOI::TIED_TO) == -1)
2920 SDNode *DU = SU->getNode()->getOperand(j).getNode();
2921 if (DU->getNodeId() == -1)
2923 const SUnit *DUSU = &(*SUnits)[DU->getNodeId()];
2924 if (!DUSU) continue;
2925 for (SUnit::const_succ_iterator I = DUSU->Succs.begin(),
2926 E = DUSU->Succs.end(); I != E; ++I) {
2927 if (I->isCtrl()) continue;
2928 SUnit *SuccSU = I->getSUnit();
2931 // Be conservative. Ignore if nodes aren't at roughly the same
2932 // depth and height.
2933 if (SuccSU->getHeight() < SU->getHeight() &&
2934 (SU->getHeight() - SuccSU->getHeight()) > 1)
2936 // Skip past COPY_TO_REGCLASS nodes, so that the pseudo edge
2937 // constrains whatever is using the copy, instead of the copy
2938 // itself. In the case that the copy is coalesced, this
2939 // preserves the intent of the pseudo two-address heurietics.
2940 while (SuccSU->Succs.size() == 1 &&
2941 SuccSU->getNode()->isMachineOpcode() &&
2942 SuccSU->getNode()->getMachineOpcode() ==
2943 TargetOpcode::COPY_TO_REGCLASS)
2944 SuccSU = SuccSU->Succs.front().getSUnit();
2945 // Don't constrain non-instruction nodes.
2946 if (!SuccSU->getNode() || !SuccSU->getNode()->isMachineOpcode())
2948 // Don't constrain nodes with physical register defs if the
2949 // predecessor can clobber them.
2950 if (SuccSU->hasPhysRegDefs && SU->hasPhysRegClobbers) {
2951 if (canClobberPhysRegDefs(SuccSU, SU, TII, TRI))
2954 // Don't constrain EXTRACT_SUBREG, INSERT_SUBREG, and SUBREG_TO_REG;
2955 // these may be coalesced away. We want them close to their uses.
2956 unsigned SuccOpc = SuccSU->getNode()->getMachineOpcode();
2957 if (SuccOpc == TargetOpcode::EXTRACT_SUBREG ||
2958 SuccOpc == TargetOpcode::INSERT_SUBREG ||
2959 SuccOpc == TargetOpcode::SUBREG_TO_REG)
2961 if (!canClobberReachingPhysRegUse(SuccSU, SU, scheduleDAG, TII, TRI) &&
2962 (!canClobber(SuccSU, DUSU) ||
2963 (isLiveOut && !hasOnlyLiveOutUses(SuccSU)) ||
2964 (!SU->isCommutable && SuccSU->isCommutable)) &&
2965 !scheduleDAG->IsReachable(SuccSU, SU)) {
2966 DEBUG(dbgs() << " Adding a pseudo-two-addr edge from SU #"
2967 << SU->NodeNum << " to SU #" << SuccSU->NodeNum << "\n");
2968 scheduleDAG->AddPred(SU, SDep(SuccSU, SDep::Artificial));
2975 //===----------------------------------------------------------------------===//
2976 // Public Constructor Functions
2977 //===----------------------------------------------------------------------===//
2979 llvm::ScheduleDAGSDNodes *
2980 llvm::createBURRListDAGScheduler(SelectionDAGISel *IS,
2981 CodeGenOpt::Level OptLevel) {
2982 const TargetMachine &TM = IS->TM;
2983 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
2984 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
2986 BURegReductionPriorityQueue *PQ =
2987 new BURegReductionPriorityQueue(*IS->MF, false, false, TII, TRI, nullptr);
2988 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
2989 PQ->setScheduleDAG(SD);
2993 llvm::ScheduleDAGSDNodes *
2994 llvm::createSourceListDAGScheduler(SelectionDAGISel *IS,
2995 CodeGenOpt::Level OptLevel) {
2996 const TargetMachine &TM = IS->TM;
2997 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
2998 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
3000 SrcRegReductionPriorityQueue *PQ =
3001 new SrcRegReductionPriorityQueue(*IS->MF, false, true, TII, TRI, nullptr);
3002 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, false, PQ, OptLevel);
3003 PQ->setScheduleDAG(SD);
3007 llvm::ScheduleDAGSDNodes *
3008 llvm::createHybridListDAGScheduler(SelectionDAGISel *IS,
3009 CodeGenOpt::Level OptLevel) {
3010 const TargetMachine &TM = IS->TM;
3011 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
3012 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
3013 const TargetLowering *TLI = IS->getTargetLowering();
3015 HybridBURRPriorityQueue *PQ =
3016 new HybridBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3018 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3019 PQ->setScheduleDAG(SD);
3023 llvm::ScheduleDAGSDNodes *
3024 llvm::createILPListDAGScheduler(SelectionDAGISel *IS,
3025 CodeGenOpt::Level OptLevel) {
3026 const TargetMachine &TM = IS->TM;
3027 const TargetInstrInfo *TII = TM.getSubtargetImpl()->getInstrInfo();
3028 const TargetRegisterInfo *TRI = TM.getSubtargetImpl()->getRegisterInfo();
3029 const TargetLowering *TLI = IS->getTargetLowering();
3031 ILPBURRPriorityQueue *PQ =
3032 new ILPBURRPriorityQueue(*IS->MF, true, false, TII, TRI, TLI);
3033 ScheduleDAGRRList *SD = new ScheduleDAGRRList(*IS->MF, true, PQ, OptLevel);
3034 PQ->setScheduleDAG(SD);