1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ScheduleDAGInstrs.h"
23 #include "llvm/CodeGen/Passes.h"
24 #include "llvm/CodeGen/LatencyPriorityQueue.h"
25 #include "llvm/CodeGen/SchedulerRegistry.h"
26 #include "llvm/CodeGen/MachineDominators.h"
27 #include "llvm/CodeGen/MachineFunctionPass.h"
28 #include "llvm/CodeGen/MachineLoopInfo.h"
29 #include "llvm/CodeGen/MachineRegisterInfo.h"
30 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
31 #include "llvm/Target/TargetLowering.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include "llvm/Target/TargetInstrInfo.h"
34 #include "llvm/Target/TargetRegisterInfo.h"
35 #include "llvm/Support/Compiler.h"
36 #include "llvm/Support/Debug.h"
37 #include "llvm/Support/ErrorHandling.h"
38 #include "llvm/ADT/Statistic.h"
42 STATISTIC(NumNoops, "Number of noops inserted");
43 STATISTIC(NumStalls, "Number of pipeline stalls");
46 EnableAntiDepBreaking("break-anti-dependencies",
47 cl::desc("Break post-RA scheduling anti-dependencies"),
48 cl::init(true), cl::Hidden);
51 EnablePostRAHazardAvoidance("avoid-hazards",
52 cl::desc("Enable simple hazard-avoidance"),
53 cl::init(true), cl::Hidden);
56 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
59 PostRAScheduler() : MachineFunctionPass(&ID) {}
61 void getAnalysisUsage(AnalysisUsage &AU) const {
62 AU.addRequired<MachineDominatorTree>();
63 AU.addPreserved<MachineDominatorTree>();
64 AU.addRequired<MachineLoopInfo>();
65 AU.addPreserved<MachineLoopInfo>();
66 MachineFunctionPass::getAnalysisUsage(AU);
69 const char *getPassName() const {
70 return "Post RA top-down list latency scheduler";
73 bool runOnMachineFunction(MachineFunction &Fn);
75 char PostRAScheduler::ID = 0;
77 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
78 /// AvailableQueue - The priority queue to use for the available SUnits.
80 LatencyPriorityQueue AvailableQueue;
82 /// PendingQueue - This contains all of the instructions whose operands have
83 /// been issued, but their results are not ready yet (due to the latency of
84 /// the operation). Once the operands becomes available, the instruction is
85 /// added to the AvailableQueue.
86 std::vector<SUnit*> PendingQueue;
88 /// Topo - A topological ordering for SUnits.
89 ScheduleDAGTopologicalSort Topo;
91 /// AllocatableSet - The set of allocatable registers.
92 /// We'll be ignoring anti-dependencies on non-allocatable registers,
93 /// because they may not be safe to break.
94 const BitVector AllocatableSet;
96 /// HazardRec - The hazard recognizer to use.
97 ScheduleHazardRecognizer *HazardRec;
99 /// Classes - For live regs that are only used in one register class in a
100 /// live range, the register class. If the register is not live, the
101 /// corresponding value is null. If the register is live but used in
102 /// multiple register classes, the corresponding value is -1 casted to a
104 const TargetRegisterClass *
105 Classes[TargetRegisterInfo::FirstVirtualRegister];
107 /// RegRegs - Map registers to all their references within a live range.
108 std::multimap<unsigned, MachineOperand *> RegRefs;
110 /// The index of the most recent kill (proceding bottom-up), or ~0u if
111 /// the register is not live.
112 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
114 /// The index of the most recent complete def (proceding bottom up), or ~0u
115 /// if the register is live.
116 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
119 SchedulePostRATDList(MachineFunction &MF,
120 const MachineLoopInfo &MLI,
121 const MachineDominatorTree &MDT,
122 ScheduleHazardRecognizer *HR)
123 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
124 AllocatableSet(TRI->getAllocatableSet(MF)),
127 ~SchedulePostRATDList() {
131 /// StartBlock - Initialize register live-range state for scheduling in
134 void StartBlock(MachineBasicBlock *BB);
136 /// Schedule - Schedule the instruction range using list scheduling.
140 /// Observe - Update liveness information to account for the current
141 /// instruction, which will not be scheduled.
143 void Observe(MachineInstr *MI, unsigned Count);
145 /// FinishBlock - Clean up register live-range state.
150 void PrescanInstruction(MachineInstr *MI);
151 void ScanInstruction(MachineInstr *MI, unsigned Count);
152 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
153 void ReleaseSuccessors(SUnit *SU);
154 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
155 void ListScheduleTopDown();
156 bool BreakAntiDependencies();
159 /// SimpleHazardRecognizer - A *very* simple hazard recognizer. It uses
160 /// a coarse classification and attempts to avoid that instructions of
161 /// a given class aren't grouped too densely together.
162 class SimpleHazardRecognizer : public ScheduleHazardRecognizer {
163 /// Class - A simple classification for SUnits.
168 /// Window - The Class values of the most recently issued
172 /// getClass - Classify the given SUnit.
173 Class getClass(const SUnit *SU) {
174 const MachineInstr *MI = SU->getInstr();
175 const TargetInstrDesc &TID = MI->getDesc();
183 /// Step - Rotate the existing entries in Window and insert the
184 /// given class value in position as the most recent.
186 std::copy(Window+1, array_endof(Window), Window);
187 Window[array_lengthof(Window)-1] = C;
191 SimpleHazardRecognizer() : Window() {}
193 virtual HazardType getHazardType(SUnit *SU) {
194 Class C = getClass(SU);
198 for (unsigned i = 0; i != array_lengthof(Window); ++i)
201 if (Score > array_lengthof(Window) * 2)
206 virtual void EmitInstruction(SUnit *SU) {
210 virtual void AdvanceCycle() {
216 /// isSchedulingBoundary - Test if the given instruction should be
217 /// considered a scheduling boundary. This primarily includes labels
220 static bool isSchedulingBoundary(const MachineInstr *MI,
221 const MachineFunction &MF) {
222 // Terminators and labels can't be scheduled around.
223 if (MI->getDesc().isTerminator() || MI->isLabel())
226 // Don't attempt to schedule around any instruction that modifies
227 // a stack-oriented pointer, as it's unlikely to be profitable. This
228 // saves compile time, because it doesn't require every single
229 // stack slot reference to depend on the instruction that does the
231 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
232 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
238 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
239 DOUT << "PostRAScheduler\n";
241 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
242 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
243 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
244 new SimpleHazardRecognizer :
245 new ScheduleHazardRecognizer();
247 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
249 // Loop over all of the basic blocks
250 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
251 MBB != MBBe; ++MBB) {
252 // Initialize register live-range state for scheduling in this block.
253 Scheduler.StartBlock(MBB);
255 // Schedule each sequence of instructions not interrupted by a label
256 // or anything else that effectively needs to shut down scheduling.
257 MachineBasicBlock::iterator Current = MBB->end();
258 unsigned Count = MBB->size(), CurrentCount = Count;
259 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
260 MachineInstr *MI = prior(I);
261 if (isSchedulingBoundary(MI, Fn)) {
262 Scheduler.Run(MBB, I, Current, CurrentCount);
263 Scheduler.EmitSchedule();
265 CurrentCount = Count - 1;
266 Scheduler.Observe(MI, CurrentCount);
271 assert(Count == 0 && "Instruction count mismatch!");
272 assert((MBB->begin() == Current || CurrentCount != 0) &&
273 "Instruction count mismatch!");
274 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
275 Scheduler.EmitSchedule();
277 // Clean up register live-range state.
278 Scheduler.FinishBlock();
284 /// StartBlock - Initialize register live-range state for scheduling in
287 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
288 // Call the superclass.
289 ScheduleDAGInstrs::StartBlock(BB);
291 // Clear out the register class data.
292 std::fill(Classes, array_endof(Classes),
293 static_cast<const TargetRegisterClass *>(0));
295 // Initialize the indices to indicate that no registers are live.
296 std::fill(KillIndices, array_endof(KillIndices), ~0u);
297 std::fill(DefIndices, array_endof(DefIndices), BB->size());
299 // Determine the live-out physregs for this block.
300 if (!BB->empty() && BB->back().getDesc().isReturn())
301 // In a return block, examine the function live-out regs.
302 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
303 E = MRI.liveout_end(); I != E; ++I) {
305 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
306 KillIndices[Reg] = BB->size();
307 DefIndices[Reg] = ~0u;
308 // Repeat, for all aliases.
309 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
310 unsigned AliasReg = *Alias;
311 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
312 KillIndices[AliasReg] = BB->size();
313 DefIndices[AliasReg] = ~0u;
317 // In a non-return block, examine the live-in regs of all successors.
318 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
319 SE = BB->succ_end(); SI != SE; ++SI)
320 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
321 E = (*SI)->livein_end(); I != E; ++I) {
323 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
324 KillIndices[Reg] = BB->size();
325 DefIndices[Reg] = ~0u;
326 // Repeat, for all aliases.
327 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
328 unsigned AliasReg = *Alias;
329 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
330 KillIndices[AliasReg] = BB->size();
331 DefIndices[AliasReg] = ~0u;
335 // Consider callee-saved registers as live-out, since we're running after
336 // prologue/epilogue insertion so there's no way to add additional
339 // TODO: If the callee saves and restores these, then we can potentially
340 // use them between the save and the restore. To do that, we could scan
341 // the exit blocks to see which of these registers are defined.
342 // Alternatively, callee-saved registers that aren't saved and restored
343 // could be marked live-in in every block.
344 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
346 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
347 KillIndices[Reg] = BB->size();
348 DefIndices[Reg] = ~0u;
349 // Repeat, for all aliases.
350 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
351 unsigned AliasReg = *Alias;
352 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
353 KillIndices[AliasReg] = BB->size();
354 DefIndices[AliasReg] = ~0u;
359 /// Schedule - Schedule the instruction range using list scheduling.
361 void SchedulePostRATDList::Schedule() {
362 DOUT << "********** List Scheduling **********\n";
364 // Build the scheduling graph.
367 if (EnableAntiDepBreaking) {
368 if (BreakAntiDependencies()) {
369 // We made changes. Update the dependency graph.
370 // Theoretically we could update the graph in place:
371 // When a live range is changed to use a different register, remove
372 // the def's anti-dependence *and* output-dependence edges due to
373 // that register, and add new anti-dependence and output-dependence
374 // edges based on the next live range of the register.
382 AvailableQueue.initNodes(SUnits);
384 ListScheduleTopDown();
386 AvailableQueue.releaseState();
389 /// Observe - Update liveness information to account for the current
390 /// instruction, which will not be scheduled.
392 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
393 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
395 // Any register which was defined within the previous scheduling region
396 // may have been rescheduled and its lifetime may overlap with registers
397 // in ways not reflected in our current liveness state. For each such
398 // register, adjust the liveness state to be conservatively correct.
399 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
400 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
401 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
402 // Mark this register to be non-renamable.
403 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
404 // Move the def index to the end of the previous region, to reflect
405 // that the def could theoretically have been scheduled at the end.
406 DefIndices[Reg] = InsertPosIndex;
409 PrescanInstruction(MI);
410 ScanInstruction(MI, Count);
413 /// FinishBlock - Clean up register live-range state.
415 void SchedulePostRATDList::FinishBlock() {
418 // Call the superclass.
419 ScheduleDAGInstrs::FinishBlock();
422 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
424 static SDep *CriticalPathStep(SUnit *SU) {
426 unsigned NextDepth = 0;
427 // Find the predecessor edge with the greatest depth.
428 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
430 SUnit *PredSU = P->getSUnit();
431 unsigned PredLatency = P->getLatency();
432 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
433 // In the case of a latency tie, prefer an anti-dependency edge over
434 // other types of edges.
435 if (NextDepth < PredTotalLatency ||
436 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
437 NextDepth = PredTotalLatency;
444 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
445 // Scan the register operands for this instruction and update
446 // Classes and RegRefs.
447 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
448 MachineOperand &MO = MI->getOperand(i);
449 if (!MO.isReg()) continue;
450 unsigned Reg = MO.getReg();
451 if (Reg == 0) continue;
452 const TargetRegisterClass *NewRC =
453 getInstrOperandRegClass(TRI, MI->getDesc(), i);
455 // For now, only allow the register to be changed if its register
456 // class is consistent across all uses.
457 if (!Classes[Reg] && NewRC)
458 Classes[Reg] = NewRC;
459 else if (!NewRC || Classes[Reg] != NewRC)
460 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
462 // Now check for aliases.
463 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
464 // If an alias of the reg is used during the live range, give up.
465 // Note that this allows us to skip checking if AntiDepReg
466 // overlaps with any of the aliases, among other things.
467 unsigned AliasReg = *Alias;
468 if (Classes[AliasReg]) {
469 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
470 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
474 // If we're still willing to consider this register, note the reference.
475 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
476 RegRefs.insert(std::make_pair(Reg, &MO));
480 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
483 // Proceding upwards, registers that are defed but not used in this
484 // instruction are now dead.
485 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
486 MachineOperand &MO = MI->getOperand(i);
487 if (!MO.isReg()) continue;
488 unsigned Reg = MO.getReg();
489 if (Reg == 0) continue;
490 if (!MO.isDef()) continue;
491 // Ignore two-addr defs.
492 if (MI->isRegTiedToUseOperand(i)) continue;
494 DefIndices[Reg] = Count;
495 KillIndices[Reg] = ~0u;
496 assert(((KillIndices[Reg] == ~0u) !=
497 (DefIndices[Reg] == ~0u)) &&
498 "Kill and Def maps aren't consistent for Reg!");
501 // Repeat, for all subregs.
502 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
504 unsigned SubregReg = *Subreg;
505 DefIndices[SubregReg] = Count;
506 KillIndices[SubregReg] = ~0u;
507 Classes[SubregReg] = 0;
508 RegRefs.erase(SubregReg);
510 // Conservatively mark super-registers as unusable.
511 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
513 unsigned SuperReg = *Super;
514 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
517 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
518 MachineOperand &MO = MI->getOperand(i);
519 if (!MO.isReg()) continue;
520 unsigned Reg = MO.getReg();
521 if (Reg == 0) continue;
522 if (!MO.isUse()) continue;
524 const TargetRegisterClass *NewRC =
525 getInstrOperandRegClass(TRI, MI->getDesc(), i);
527 // For now, only allow the register to be changed if its register
528 // class is consistent across all uses.
529 if (!Classes[Reg] && NewRC)
530 Classes[Reg] = NewRC;
531 else if (!NewRC || Classes[Reg] != NewRC)
532 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
534 RegRefs.insert(std::make_pair(Reg, &MO));
536 // It wasn't previously live but now it is, this is a kill.
537 if (KillIndices[Reg] == ~0u) {
538 KillIndices[Reg] = Count;
539 DefIndices[Reg] = ~0u;
540 assert(((KillIndices[Reg] == ~0u) !=
541 (DefIndices[Reg] == ~0u)) &&
542 "Kill and Def maps aren't consistent for Reg!");
544 // Repeat, for all aliases.
545 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
546 unsigned AliasReg = *Alias;
547 if (KillIndices[AliasReg] == ~0u) {
548 KillIndices[AliasReg] = Count;
549 DefIndices[AliasReg] = ~0u;
555 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
556 /// of the ScheduleDAG and break them by renaming registers.
558 bool SchedulePostRATDList::BreakAntiDependencies() {
559 // The code below assumes that there is at least one instruction,
560 // so just duck out immediately if the block is empty.
561 if (SUnits.empty()) return false;
563 // Find the node at the bottom of the critical path.
565 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
566 SUnit *SU = &SUnits[i];
567 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
571 DOUT << "Critical path has total latency "
572 << (Max->getDepth() + Max->Latency) << "\n";
574 // Track progress along the critical path through the SUnit graph as we walk
576 SUnit *CriticalPathSU = Max;
577 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
579 // Consider this pattern:
588 // There are three anti-dependencies here, and without special care,
589 // we'd break all of them using the same register:
598 // because at each anti-dependence, B is the first register that
599 // isn't A which is free. This re-introduces anti-dependencies
600 // at all but one of the original anti-dependencies that we were
601 // trying to break. To avoid this, keep track of the most recent
602 // register that each register was replaced with, avoid avoid
603 // using it to repair an anti-dependence on the same register.
604 // This lets us produce this:
613 // This still has an anti-dependence on B, but at least it isn't on the
614 // original critical path.
616 // TODO: If we tracked more than one register here, we could potentially
617 // fix that remaining critical edge too. This is a little more involved,
618 // because unlike the most recent register, less recent registers should
619 // still be considered, though only if no other registers are available.
620 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
622 // Attempt to break anti-dependence edges on the critical path. Walk the
623 // instructions from the bottom up, tracking information about liveness
624 // as we go to help determine which registers are available.
625 bool Changed = false;
626 unsigned Count = InsertPosIndex - 1;
627 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
629 MachineInstr *MI = --I;
631 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
632 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
633 // is left behind appearing to clobber the super-register, while the
634 // subregister needs to remain live. So we just ignore them.
635 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
638 // Check if this instruction has a dependence on the critical path that
639 // is an anti-dependence that we may be able to break. If it is, set
640 // AntiDepReg to the non-zero register associated with the anti-dependence.
642 // We limit our attention to the critical path as a heuristic to avoid
643 // breaking anti-dependence edges that aren't going to significantly
644 // impact the overall schedule. There are a limited number of registers
645 // and we want to save them for the important edges.
647 // TODO: Instructions with multiple defs could have multiple
648 // anti-dependencies. The current code here only knows how to break one
649 // edge per instruction. Note that we'd have to be able to break all of
650 // the anti-dependencies in an instruction in order to be effective.
651 unsigned AntiDepReg = 0;
652 if (MI == CriticalPathMI) {
653 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
654 SUnit *NextSU = Edge->getSUnit();
656 // Only consider anti-dependence edges.
657 if (Edge->getKind() == SDep::Anti) {
658 AntiDepReg = Edge->getReg();
659 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
660 // Don't break anti-dependencies on non-allocatable registers.
661 if (!AllocatableSet.test(AntiDepReg))
664 // If the SUnit has other dependencies on the SUnit that it
665 // anti-depends on, don't bother breaking the anti-dependency
666 // since those edges would prevent such units from being
667 // scheduled past each other regardless.
669 // Also, if there are dependencies on other SUnits with the
670 // same register as the anti-dependency, don't attempt to
672 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
673 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
674 if (P->getSUnit() == NextSU ?
675 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
676 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
682 CriticalPathSU = NextSU;
683 CriticalPathMI = CriticalPathSU->getInstr();
685 // We've reached the end of the critical path.
691 PrescanInstruction(MI);
693 // If this instruction has a use of AntiDepReg, breaking it
695 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
696 MachineOperand &MO = MI->getOperand(i);
697 if (!MO.isReg()) continue;
698 unsigned Reg = MO.getReg();
699 if (Reg == 0) continue;
700 if (MO.isUse() && AntiDepReg == Reg) {
706 // Determine AntiDepReg's register class, if it is live and is
707 // consistently used within a single class.
708 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
709 assert((AntiDepReg == 0 || RC != NULL) &&
710 "Register should be live if it's causing an anti-dependence!");
711 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
714 // Look for a suitable register to use to break the anti-depenence.
716 // TODO: Instead of picking the first free register, consider which might
718 if (AntiDepReg != 0) {
719 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
720 RE = RC->allocation_order_end(MF); R != RE; ++R) {
721 unsigned NewReg = *R;
722 // Don't replace a register with itself.
723 if (NewReg == AntiDepReg) continue;
724 // Don't replace a register with one that was recently used to repair
725 // an anti-dependence with this AntiDepReg, because that would
726 // re-introduce that anti-dependence.
727 if (NewReg == LastNewReg[AntiDepReg]) continue;
728 // If NewReg is dead and NewReg's most recent def is not before
729 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
730 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
731 "Kill and Def maps aren't consistent for AntiDepReg!");
732 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
733 "Kill and Def maps aren't consistent for NewReg!");
734 if (KillIndices[NewReg] == ~0u &&
735 Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) &&
736 KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
737 DOUT << "Breaking anti-dependence edge on "
738 << TRI->getName(AntiDepReg)
739 << " with " << RegRefs.count(AntiDepReg) << " references"
740 << " using " << TRI->getName(NewReg) << "!\n";
742 // Update the references to the old register to refer to the new
744 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
745 std::multimap<unsigned, MachineOperand *>::iterator>
746 Range = RegRefs.equal_range(AntiDepReg);
747 for (std::multimap<unsigned, MachineOperand *>::iterator
748 Q = Range.first, QE = Range.second; Q != QE; ++Q)
749 Q->second->setReg(NewReg);
751 // We just went back in time and modified history; the
752 // liveness information for the anti-depenence reg is now
753 // inconsistent. Set the state as if it were dead.
754 Classes[NewReg] = Classes[AntiDepReg];
755 DefIndices[NewReg] = DefIndices[AntiDepReg];
756 KillIndices[NewReg] = KillIndices[AntiDepReg];
757 assert(((KillIndices[NewReg] == ~0u) !=
758 (DefIndices[NewReg] == ~0u)) &&
759 "Kill and Def maps aren't consistent for NewReg!");
761 Classes[AntiDepReg] = 0;
762 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
763 KillIndices[AntiDepReg] = ~0u;
764 assert(((KillIndices[AntiDepReg] == ~0u) !=
765 (DefIndices[AntiDepReg] == ~0u)) &&
766 "Kill and Def maps aren't consistent for AntiDepReg!");
768 RegRefs.erase(AntiDepReg);
770 LastNewReg[AntiDepReg] = NewReg;
776 ScanInstruction(MI, Count);
782 //===----------------------------------------------------------------------===//
783 // Top-Down Scheduling
784 //===----------------------------------------------------------------------===//
786 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
787 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
788 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
789 SUnit *SuccSU = SuccEdge->getSUnit();
790 --SuccSU->NumPredsLeft;
793 if (SuccSU->NumPredsLeft < 0) {
794 cerr << "*** Scheduling failed! ***\n";
796 cerr << " has been released too many times!\n";
801 // Compute how many cycles it will be before this actually becomes
802 // available. This is the max of the start time of all predecessors plus
804 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
806 // If all the node's predecessors are scheduled, this node is ready
807 // to be scheduled. Ignore the special ExitSU node.
808 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
809 PendingQueue.push_back(SuccSU);
812 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
813 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
814 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
816 ReleaseSucc(SU, &*I);
819 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
820 /// count of its successors. If a successor pending count is zero, add it to
821 /// the Available queue.
822 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
823 DOUT << "*** Scheduling [" << CurCycle << "]: ";
824 DEBUG(SU->dump(this));
826 Sequence.push_back(SU);
827 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
828 SU->setDepthToAtLeast(CurCycle);
830 ReleaseSuccessors(SU);
831 SU->isScheduled = true;
832 AvailableQueue.ScheduledNode(SU);
835 /// ListScheduleTopDown - The main loop of list scheduling for top-down
837 void SchedulePostRATDList::ListScheduleTopDown() {
838 unsigned CurCycle = 0;
840 // Release any successors of the special Entry node.
841 ReleaseSuccessors(&EntrySU);
843 // All leaves to Available queue.
844 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
845 // It is available if it has no predecessors.
846 if (SUnits[i].Preds.empty()) {
847 AvailableQueue.push(&SUnits[i]);
848 SUnits[i].isAvailable = true;
852 // While Available queue is not empty, grab the node with the highest
853 // priority. If it is not ready put it back. Schedule the node.
854 std::vector<SUnit*> NotReady;
855 Sequence.reserve(SUnits.size());
856 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
857 // Check to see if any of the pending instructions are ready to issue. If
858 // so, add them to the available queue.
859 unsigned MinDepth = ~0u;
860 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
861 if (PendingQueue[i]->getDepth() <= CurCycle) {
862 AvailableQueue.push(PendingQueue[i]);
863 PendingQueue[i]->isAvailable = true;
864 PendingQueue[i] = PendingQueue.back();
865 PendingQueue.pop_back();
867 } else if (PendingQueue[i]->getDepth() < MinDepth)
868 MinDepth = PendingQueue[i]->getDepth();
871 // If there are no instructions available, don't try to issue anything, and
872 // don't advance the hazard recognizer.
873 if (AvailableQueue.empty()) {
874 CurCycle = MinDepth != ~0u ? MinDepth : CurCycle + 1;
878 SUnit *FoundSUnit = 0;
880 bool HasNoopHazards = false;
881 while (!AvailableQueue.empty()) {
882 SUnit *CurSUnit = AvailableQueue.pop();
884 ScheduleHazardRecognizer::HazardType HT =
885 HazardRec->getHazardType(CurSUnit);
886 if (HT == ScheduleHazardRecognizer::NoHazard) {
887 FoundSUnit = CurSUnit;
891 // Remember if this is a noop hazard.
892 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
894 NotReady.push_back(CurSUnit);
897 // Add the nodes that aren't ready back onto the available list.
898 if (!NotReady.empty()) {
899 AvailableQueue.push_all(NotReady);
903 // If we found a node to schedule, do it now.
905 ScheduleNodeTopDown(FoundSUnit, CurCycle);
906 HazardRec->EmitInstruction(FoundSUnit);
908 // If this is a pseudo-op node, we don't want to increment the current
910 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
912 } else if (!HasNoopHazards) {
913 // Otherwise, we have a pipeline stall, but no other problem, just advance
914 // the current cycle and try again.
915 DOUT << "*** Advancing cycle, no work to do\n";
916 HazardRec->AdvanceCycle();
920 // Otherwise, we have no instructions to issue and we have instructions
921 // that will fault if we don't do this right. This is the case for
922 // processors without pipeline interlocks and other cases.
923 DOUT << "*** Emitting noop\n";
924 HazardRec->EmitNoop();
925 Sequence.push_back(0); // NULL here means noop
932 VerifySchedule(/*isBottomUp=*/false);
936 //===----------------------------------------------------------------------===//
937 // Public Constructor Functions
938 //===----------------------------------------------------------------------===//
940 FunctionPass *llvm::createPostRAScheduler() {
941 return new PostRAScheduler();