1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ExactHazardRecognizer.h"
23 #include "SimpleHazardRecognizer.h"
24 #include "ScheduleDAGInstrs.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/SchedulerRegistry.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
33 #include "llvm/Target/TargetLowering.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
37 #include "llvm/Target/TargetSubtarget.h"
38 #include "llvm/Support/Compiler.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/raw_ostream.h"
42 #include "llvm/ADT/Statistic.h"
47 STATISTIC(NumNoops, "Number of noops inserted");
48 STATISTIC(NumStalls, "Number of pipeline stalls");
51 EnableAntiDepBreaking("break-anti-dependencies",
52 cl::desc("Break post-RA scheduling anti-dependencies"),
53 cl::init(true), cl::Hidden);
56 EnablePostRAHazardAvoidance("avoid-hazards",
57 cl::desc("Enable exact hazard avoidance"),
58 cl::init(true), cl::Hidden);
60 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
62 DebugDiv("postra-sched-debugdiv",
63 cl::desc("Debug control MBBs that are scheduled"),
64 cl::init(0), cl::Hidden);
66 DebugMod("postra-sched-debugmod",
67 cl::desc("Debug control MBBs that are scheduled"),
68 cl::init(0), cl::Hidden);
71 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
74 PostRAScheduler() : MachineFunctionPass(&ID) {}
76 void getAnalysisUsage(AnalysisUsage &AU) const {
78 AU.addRequired<MachineDominatorTree>();
79 AU.addPreserved<MachineDominatorTree>();
80 AU.addRequired<MachineLoopInfo>();
81 AU.addPreserved<MachineLoopInfo>();
82 MachineFunctionPass::getAnalysisUsage(AU);
85 const char *getPassName() const {
86 return "Post RA top-down list latency scheduler";
89 bool runOnMachineFunction(MachineFunction &Fn);
91 char PostRAScheduler::ID = 0;
93 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
94 /// AvailableQueue - The priority queue to use for the available SUnits.
96 LatencyPriorityQueue AvailableQueue;
98 /// PendingQueue - This contains all of the instructions whose operands have
99 /// been issued, but their results are not ready yet (due to the latency of
100 /// the operation). Once the operands becomes available, the instruction is
101 /// added to the AvailableQueue.
102 std::vector<SUnit*> PendingQueue;
104 /// Topo - A topological ordering for SUnits.
105 ScheduleDAGTopologicalSort Topo;
107 /// AllocatableSet - The set of allocatable registers.
108 /// We'll be ignoring anti-dependencies on non-allocatable registers,
109 /// because they may not be safe to break.
110 const BitVector AllocatableSet;
112 /// HazardRec - The hazard recognizer to use.
113 ScheduleHazardRecognizer *HazardRec;
115 /// Classes - For live regs that are only used in one register class in a
116 /// live range, the register class. If the register is not live, the
117 /// corresponding value is null. If the register is live but used in
118 /// multiple register classes, the corresponding value is -1 casted to a
120 const TargetRegisterClass *
121 Classes[TargetRegisterInfo::FirstVirtualRegister];
123 /// RegRegs - Map registers to all their references within a live range.
124 std::multimap<unsigned, MachineOperand *> RegRefs;
126 /// The index of the most recent kill (proceding bottom-up), or ~0u if
127 /// the register is not live.
128 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
130 /// The index of the most recent complete def (proceding bottom up), or ~0u
131 /// if the register is live.
132 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
135 SchedulePostRATDList(MachineFunction &MF,
136 const MachineLoopInfo &MLI,
137 const MachineDominatorTree &MDT,
138 ScheduleHazardRecognizer *HR)
139 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
140 AllocatableSet(TRI->getAllocatableSet(MF)),
143 ~SchedulePostRATDList() {
147 /// StartBlock - Initialize register live-range state for scheduling in
150 void StartBlock(MachineBasicBlock *BB);
152 /// Schedule - Schedule the instruction range using list scheduling.
156 /// FixupKills - Fix register kill flags that have been made
157 /// invalid due to scheduling
159 void FixupKills(MachineBasicBlock *MBB);
161 /// Observe - Update liveness information to account for the current
162 /// instruction, which will not be scheduled.
164 void Observe(MachineInstr *MI, unsigned Count);
166 /// FinishBlock - Clean up register live-range state.
171 void PrescanInstruction(MachineInstr *MI);
172 void ScanInstruction(MachineInstr *MI, unsigned Count);
173 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
174 void ReleaseSuccessors(SUnit *SU);
175 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
176 void ListScheduleTopDown();
177 bool BreakAntiDependencies();
178 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
180 const TargetRegisterClass *);
181 void StartBlockForKills(MachineBasicBlock *BB);
183 // ToggleKillFlag - Toggle a register operand kill flag. Other
184 // adjustments may be made to the instruction if necessary. Return
185 // true if the operand has been deleted, false if not.
186 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
190 /// isSchedulingBoundary - Test if the given instruction should be
191 /// considered a scheduling boundary. This primarily includes labels
194 static bool isSchedulingBoundary(const MachineInstr *MI,
195 const MachineFunction &MF) {
196 // Terminators and labels can't be scheduled around.
197 if (MI->getDesc().isTerminator() || MI->isLabel())
200 // Don't attempt to schedule around any instruction that modifies
201 // a stack-oriented pointer, as it's unlikely to be profitable. This
202 // saves compile time, because it doesn't require every single
203 // stack slot reference to depend on the instruction that does the
205 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
206 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
212 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
213 // Check that post-RA scheduling is enabled for this function
214 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
215 if (!ST.enablePostRAScheduler())
218 DEBUG(errs() << "PostRAScheduler\n");
220 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
221 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
222 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
223 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
224 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
225 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
227 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
229 // Loop over all of the basic blocks
230 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
231 MBB != MBBe; ++MBB) {
233 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
235 static int bbcnt = 0;
236 if (bbcnt++ % DebugDiv != DebugMod)
238 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
239 ":MBB ID#" << MBB->getNumber() << " ***\n";
243 // Initialize register live-range state for scheduling in this block.
244 Scheduler.StartBlock(MBB);
246 // Schedule each sequence of instructions not interrupted by a label
247 // or anything else that effectively needs to shut down scheduling.
248 MachineBasicBlock::iterator Current = MBB->end();
249 unsigned Count = MBB->size(), CurrentCount = Count;
250 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
251 MachineInstr *MI = prior(I);
252 if (isSchedulingBoundary(MI, Fn)) {
253 Scheduler.Run(MBB, I, Current, CurrentCount);
254 Scheduler.EmitSchedule(0);
256 CurrentCount = Count - 1;
257 Scheduler.Observe(MI, CurrentCount);
262 assert(Count == 0 && "Instruction count mismatch!");
263 assert((MBB->begin() == Current || CurrentCount != 0) &&
264 "Instruction count mismatch!");
265 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
266 Scheduler.EmitSchedule(0);
268 // Clean up register live-range state.
269 Scheduler.FinishBlock();
271 // Update register kills
272 Scheduler.FixupKills(MBB);
278 /// StartBlock - Initialize register live-range state for scheduling in
281 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
282 // Call the superclass.
283 ScheduleDAGInstrs::StartBlock(BB);
285 // Reset the hazard recognizer.
288 // Clear out the register class data.
289 std::fill(Classes, array_endof(Classes),
290 static_cast<const TargetRegisterClass *>(0));
292 // Initialize the indices to indicate that no registers are live.
293 std::fill(KillIndices, array_endof(KillIndices), ~0u);
294 std::fill(DefIndices, array_endof(DefIndices), BB->size());
296 // Determine the live-out physregs for this block.
297 if (!BB->empty() && BB->back().getDesc().isReturn())
298 // In a return block, examine the function live-out regs.
299 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
300 E = MRI.liveout_end(); I != E; ++I) {
302 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
303 KillIndices[Reg] = BB->size();
304 DefIndices[Reg] = ~0u;
305 // Repeat, for all aliases.
306 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
307 unsigned AliasReg = *Alias;
308 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
309 KillIndices[AliasReg] = BB->size();
310 DefIndices[AliasReg] = ~0u;
314 // In a non-return block, examine the live-in regs of all successors.
315 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
316 SE = BB->succ_end(); SI != SE; ++SI)
317 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
318 E = (*SI)->livein_end(); I != E; ++I) {
320 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
321 KillIndices[Reg] = BB->size();
322 DefIndices[Reg] = ~0u;
323 // Repeat, for all aliases.
324 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
325 unsigned AliasReg = *Alias;
326 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
327 KillIndices[AliasReg] = BB->size();
328 DefIndices[AliasReg] = ~0u;
332 // Consider callee-saved registers as live-out, since we're running after
333 // prologue/epilogue insertion so there's no way to add additional
336 // TODO: there is a new method
337 // MachineFrameInfo::getPristineRegs(MBB). It gives you a list of
338 // CSRs that have not been saved when entering the MBB. The
339 // remaining CSRs have been saved and can be treated like call
340 // clobbered registers.
341 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
343 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
344 KillIndices[Reg] = BB->size();
345 DefIndices[Reg] = ~0u;
346 // Repeat, for all aliases.
347 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
348 unsigned AliasReg = *Alias;
349 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
350 KillIndices[AliasReg] = BB->size();
351 DefIndices[AliasReg] = ~0u;
356 /// Schedule - Schedule the instruction range using list scheduling.
358 void SchedulePostRATDList::Schedule() {
359 DEBUG(errs() << "********** List Scheduling **********\n");
361 // Build the scheduling graph.
364 if (EnableAntiDepBreaking) {
365 if (BreakAntiDependencies()) {
366 // We made changes. Update the dependency graph.
367 // Theoretically we could update the graph in place:
368 // When a live range is changed to use a different register, remove
369 // the def's anti-dependence *and* output-dependence edges due to
370 // that register, and add new anti-dependence and output-dependence
371 // edges based on the next live range of the register.
379 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
380 SUnits[su].dumpAll(this));
382 AvailableQueue.initNodes(SUnits);
384 ListScheduleTopDown();
386 AvailableQueue.releaseState();
389 /// Observe - Update liveness information to account for the current
390 /// instruction, which will not be scheduled.
392 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
393 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
395 // Any register which was defined within the previous scheduling region
396 // may have been rescheduled and its lifetime may overlap with registers
397 // in ways not reflected in our current liveness state. For each such
398 // register, adjust the liveness state to be conservatively correct.
399 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
400 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
401 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
402 // Mark this register to be non-renamable.
403 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
404 // Move the def index to the end of the previous region, to reflect
405 // that the def could theoretically have been scheduled at the end.
406 DefIndices[Reg] = InsertPosIndex;
409 PrescanInstruction(MI);
410 ScanInstruction(MI, Count);
413 /// FinishBlock - Clean up register live-range state.
415 void SchedulePostRATDList::FinishBlock() {
418 // Call the superclass.
419 ScheduleDAGInstrs::FinishBlock();
422 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
424 static SDep *CriticalPathStep(SUnit *SU) {
426 unsigned NextDepth = 0;
427 // Find the predecessor edge with the greatest depth.
428 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
430 SUnit *PredSU = P->getSUnit();
431 unsigned PredLatency = P->getLatency();
432 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
433 // In the case of a latency tie, prefer an anti-dependency edge over
434 // other types of edges.
435 if (NextDepth < PredTotalLatency ||
436 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
437 NextDepth = PredTotalLatency;
444 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
445 // Scan the register operands for this instruction and update
446 // Classes and RegRefs.
447 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
448 MachineOperand &MO = MI->getOperand(i);
449 if (!MO.isReg()) continue;
450 unsigned Reg = MO.getReg();
451 if (Reg == 0) continue;
452 const TargetRegisterClass *NewRC = 0;
454 if (i < MI->getDesc().getNumOperands())
455 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
457 // For now, only allow the register to be changed if its register
458 // class is consistent across all uses.
459 if (!Classes[Reg] && NewRC)
460 Classes[Reg] = NewRC;
461 else if (!NewRC || Classes[Reg] != NewRC)
462 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
464 // Now check for aliases.
465 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
466 // If an alias of the reg is used during the live range, give up.
467 // Note that this allows us to skip checking if AntiDepReg
468 // overlaps with any of the aliases, among other things.
469 unsigned AliasReg = *Alias;
470 if (Classes[AliasReg]) {
471 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
472 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
476 // If we're still willing to consider this register, note the reference.
477 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
478 RegRefs.insert(std::make_pair(Reg, &MO));
482 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
485 // Proceding upwards, registers that are defed but not used in this
486 // instruction are now dead.
487 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
488 MachineOperand &MO = MI->getOperand(i);
489 if (!MO.isReg()) continue;
490 unsigned Reg = MO.getReg();
491 if (Reg == 0) continue;
492 if (!MO.isDef()) continue;
493 // Ignore two-addr defs.
494 if (MI->isRegTiedToUseOperand(i)) continue;
496 DefIndices[Reg] = Count;
497 KillIndices[Reg] = ~0u;
498 assert(((KillIndices[Reg] == ~0u) !=
499 (DefIndices[Reg] == ~0u)) &&
500 "Kill and Def maps aren't consistent for Reg!");
503 // Repeat, for all subregs.
504 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
506 unsigned SubregReg = *Subreg;
507 DefIndices[SubregReg] = Count;
508 KillIndices[SubregReg] = ~0u;
509 Classes[SubregReg] = 0;
510 RegRefs.erase(SubregReg);
512 // Conservatively mark super-registers as unusable.
513 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
515 unsigned SuperReg = *Super;
516 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
519 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
520 MachineOperand &MO = MI->getOperand(i);
521 if (!MO.isReg()) continue;
522 unsigned Reg = MO.getReg();
523 if (Reg == 0) continue;
524 if (!MO.isUse()) continue;
526 const TargetRegisterClass *NewRC = 0;
527 if (i < MI->getDesc().getNumOperands())
528 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
530 // For now, only allow the register to be changed if its register
531 // class is consistent across all uses.
532 if (!Classes[Reg] && NewRC)
533 Classes[Reg] = NewRC;
534 else if (!NewRC || Classes[Reg] != NewRC)
535 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
537 RegRefs.insert(std::make_pair(Reg, &MO));
539 // It wasn't previously live but now it is, this is a kill.
540 if (KillIndices[Reg] == ~0u) {
541 KillIndices[Reg] = Count;
542 DefIndices[Reg] = ~0u;
543 assert(((KillIndices[Reg] == ~0u) !=
544 (DefIndices[Reg] == ~0u)) &&
545 "Kill and Def maps aren't consistent for Reg!");
547 // Repeat, for all aliases.
548 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
549 unsigned AliasReg = *Alias;
550 if (KillIndices[AliasReg] == ~0u) {
551 KillIndices[AliasReg] = Count;
552 DefIndices[AliasReg] = ~0u;
559 SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
561 const TargetRegisterClass *RC) {
562 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
563 RE = RC->allocation_order_end(MF); R != RE; ++R) {
564 unsigned NewReg = *R;
565 // Don't replace a register with itself.
566 if (NewReg == AntiDepReg) continue;
567 // Don't replace a register with one that was recently used to repair
568 // an anti-dependence with this AntiDepReg, because that would
569 // re-introduce that anti-dependence.
570 if (NewReg == LastNewReg) continue;
571 // If NewReg is dead and NewReg's most recent def is not before
572 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
573 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
574 "Kill and Def maps aren't consistent for AntiDepReg!");
575 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
576 "Kill and Def maps aren't consistent for NewReg!");
577 if (KillIndices[NewReg] != ~0u ||
578 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
579 KillIndices[AntiDepReg] > DefIndices[NewReg])
584 // No registers are free and available!
588 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
589 /// of the ScheduleDAG and break them by renaming registers.
591 bool SchedulePostRATDList::BreakAntiDependencies() {
592 // The code below assumes that there is at least one instruction,
593 // so just duck out immediately if the block is empty.
594 if (SUnits.empty()) return false;
596 // Find the node at the bottom of the critical path.
598 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
599 SUnit *SU = &SUnits[i];
600 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
604 DEBUG(errs() << "Critical path has total latency "
605 << (Max->getDepth() + Max->Latency) << "\n");
607 // Track progress along the critical path through the SUnit graph as we walk
609 SUnit *CriticalPathSU = Max;
610 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
612 // Consider this pattern:
621 // There are three anti-dependencies here, and without special care,
622 // we'd break all of them using the same register:
631 // because at each anti-dependence, B is the first register that
632 // isn't A which is free. This re-introduces anti-dependencies
633 // at all but one of the original anti-dependencies that we were
634 // trying to break. To avoid this, keep track of the most recent
635 // register that each register was replaced with, avoid
636 // using it to repair an anti-dependence on the same register.
637 // This lets us produce this:
646 // This still has an anti-dependence on B, but at least it isn't on the
647 // original critical path.
649 // TODO: If we tracked more than one register here, we could potentially
650 // fix that remaining critical edge too. This is a little more involved,
651 // because unlike the most recent register, less recent registers should
652 // still be considered, though only if no other registers are available.
653 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
655 // Attempt to break anti-dependence edges on the critical path. Walk the
656 // instructions from the bottom up, tracking information about liveness
657 // as we go to help determine which registers are available.
658 bool Changed = false;
659 unsigned Count = InsertPosIndex - 1;
660 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
662 MachineInstr *MI = --I;
664 // After regalloc, KILL instructions aren't safe to treat as
665 // dependence-breaking. In the case of an INSERT_SUBREG, the KILL
666 // is left behind appearing to clobber the super-register, while the
667 // subregister needs to remain live. So we just ignore them.
668 if (MI->getOpcode() == TargetInstrInfo::KILL)
671 // Check if this instruction has a dependence on the critical path that
672 // is an anti-dependence that we may be able to break. If it is, set
673 // AntiDepReg to the non-zero register associated with the anti-dependence.
675 // We limit our attention to the critical path as a heuristic to avoid
676 // breaking anti-dependence edges that aren't going to significantly
677 // impact the overall schedule. There are a limited number of registers
678 // and we want to save them for the important edges.
680 // TODO: Instructions with multiple defs could have multiple
681 // anti-dependencies. The current code here only knows how to break one
682 // edge per instruction. Note that we'd have to be able to break all of
683 // the anti-dependencies in an instruction in order to be effective.
684 unsigned AntiDepReg = 0;
685 if (MI == CriticalPathMI) {
686 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
687 SUnit *NextSU = Edge->getSUnit();
689 // Only consider anti-dependence edges.
690 if (Edge->getKind() == SDep::Anti) {
691 AntiDepReg = Edge->getReg();
692 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
693 // Don't break anti-dependencies on non-allocatable registers.
694 if (!AllocatableSet.test(AntiDepReg))
697 // If the SUnit has other dependencies on the SUnit that it
698 // anti-depends on, don't bother breaking the anti-dependency
699 // since those edges would prevent such units from being
700 // scheduled past each other regardless.
702 // Also, if there are dependencies on other SUnits with the
703 // same register as the anti-dependency, don't attempt to
705 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
706 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
707 if (P->getSUnit() == NextSU ?
708 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
709 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
715 CriticalPathSU = NextSU;
716 CriticalPathMI = CriticalPathSU->getInstr();
718 // We've reached the end of the critical path.
724 PrescanInstruction(MI);
726 // If this instruction has a use of AntiDepReg, breaking it
728 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
729 MachineOperand &MO = MI->getOperand(i);
730 if (!MO.isReg()) continue;
731 unsigned Reg = MO.getReg();
732 if (Reg == 0) continue;
733 if (MO.isUse() && AntiDepReg == Reg) {
739 // Determine AntiDepReg's register class, if it is live and is
740 // consistently used within a single class.
741 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
742 assert((AntiDepReg == 0 || RC != NULL) &&
743 "Register should be live if it's causing an anti-dependence!");
744 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
747 // Look for a suitable register to use to break the anti-depenence.
749 // TODO: Instead of picking the first free register, consider which might
751 if (AntiDepReg != 0) {
752 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
753 LastNewReg[AntiDepReg],
755 DEBUG(errs() << "Breaking anti-dependence edge on "
756 << TRI->getName(AntiDepReg)
757 << " with " << RegRefs.count(AntiDepReg) << " references"
758 << " using " << TRI->getName(NewReg) << "!\n");
760 // Update the references to the old register to refer to the new
762 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
763 std::multimap<unsigned, MachineOperand *>::iterator>
764 Range = RegRefs.equal_range(AntiDepReg);
765 for (std::multimap<unsigned, MachineOperand *>::iterator
766 Q = Range.first, QE = Range.second; Q != QE; ++Q)
767 Q->second->setReg(NewReg);
769 // We just went back in time and modified history; the
770 // liveness information for the anti-depenence reg is now
771 // inconsistent. Set the state as if it were dead.
772 Classes[NewReg] = Classes[AntiDepReg];
773 DefIndices[NewReg] = DefIndices[AntiDepReg];
774 KillIndices[NewReg] = KillIndices[AntiDepReg];
775 assert(((KillIndices[NewReg] == ~0u) !=
776 (DefIndices[NewReg] == ~0u)) &&
777 "Kill and Def maps aren't consistent for NewReg!");
779 Classes[AntiDepReg] = 0;
780 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
781 KillIndices[AntiDepReg] = ~0u;
782 assert(((KillIndices[AntiDepReg] == ~0u) !=
783 (DefIndices[AntiDepReg] == ~0u)) &&
784 "Kill and Def maps aren't consistent for AntiDepReg!");
786 RegRefs.erase(AntiDepReg);
788 LastNewReg[AntiDepReg] = NewReg;
792 ScanInstruction(MI, Count);
798 /// StartBlockForKills - Initialize register live-range state for updating kills
800 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
801 // Initialize the indices to indicate that no registers are live.
802 std::fill(KillIndices, array_endof(KillIndices), ~0u);
804 // Determine the live-out physregs for this block.
805 if (!BB->empty() && BB->back().getDesc().isReturn()) {
806 // In a return block, examine the function live-out regs.
807 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
808 E = MRI.liveout_end(); I != E; ++I) {
810 KillIndices[Reg] = BB->size();
811 // Repeat, for all subregs.
812 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
814 KillIndices[*Subreg] = BB->size();
819 // In a non-return block, examine the live-in regs of all successors.
820 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
821 SE = BB->succ_end(); SI != SE; ++SI) {
822 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
823 E = (*SI)->livein_end(); I != E; ++I) {
825 KillIndices[Reg] = BB->size();
826 // Repeat, for all subregs.
827 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
829 KillIndices[*Subreg] = BB->size();
836 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
837 MachineOperand &MO) {
838 // Setting kill flag...
844 // If MO itself is live, clear the kill flag...
845 if (KillIndices[MO.getReg()] != ~0u) {
850 // If any subreg of MO is live, then create an imp-def for that
851 // subreg and keep MO marked as killed.
853 const unsigned SuperReg = MO.getReg();
854 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
856 if (KillIndices[*Subreg] != ~0u) {
857 MI->addOperand(MachineOperand::CreateReg(*Subreg,
866 MO.setIsKill(AllDead);
870 /// FixupKills - Fix the register kill flags, they may have been made
871 /// incorrect by instruction reordering.
873 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
874 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
876 std::set<unsigned> killedRegs;
877 BitVector ReservedRegs = TRI->getReservedRegs(MF);
879 StartBlockForKills(MBB);
881 // Examine block from end to start...
882 unsigned Count = MBB->size();
883 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
885 MachineInstr *MI = --I;
887 // Update liveness. Registers that are defed but not used in this
888 // instruction are now dead. Mark register and all subregs as they
889 // are completely defined.
890 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
891 MachineOperand &MO = MI->getOperand(i);
892 if (!MO.isReg()) continue;
893 unsigned Reg = MO.getReg();
894 if (Reg == 0) continue;
895 if (!MO.isDef()) continue;
896 // Ignore two-addr defs.
897 if (MI->isRegTiedToUseOperand(i)) continue;
899 KillIndices[Reg] = ~0u;
901 // Repeat for all subregs.
902 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
904 KillIndices[*Subreg] = ~0u;
908 // Examine all used registers and set/clear kill flag. When a
909 // register is used multiple times we only set the kill flag on
912 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
913 MachineOperand &MO = MI->getOperand(i);
914 if (!MO.isReg() || !MO.isUse()) continue;
915 unsigned Reg = MO.getReg();
916 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
919 if (killedRegs.find(Reg) == killedRegs.end()) {
921 // A register is not killed if any subregs are live...
922 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
924 if (KillIndices[*Subreg] != ~0u) {
930 // If subreg is not live, then register is killed if it became
931 // live in this instruction
933 kill = (KillIndices[Reg] == ~0u);
936 if (MO.isKill() != kill) {
937 bool removed = ToggleKillFlag(MI, MO);
939 DEBUG(errs() << "Fixed <removed> in ");
941 DEBUG(errs() << "Fixed " << MO << " in ");
946 killedRegs.insert(Reg);
949 // Mark any used register (that is not using undef) and subregs as
951 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
952 MachineOperand &MO = MI->getOperand(i);
953 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
954 unsigned Reg = MO.getReg();
955 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
957 KillIndices[Reg] = Count;
959 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
961 KillIndices[*Subreg] = Count;
967 //===----------------------------------------------------------------------===//
968 // Top-Down Scheduling
969 //===----------------------------------------------------------------------===//
971 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
972 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
973 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
974 SUnit *SuccSU = SuccEdge->getSUnit();
977 if (SuccSU->NumPredsLeft == 0) {
978 errs() << "*** Scheduling failed! ***\n";
980 errs() << " has been released too many times!\n";
984 --SuccSU->NumPredsLeft;
986 // Compute how many cycles it will be before this actually becomes
987 // available. This is the max of the start time of all predecessors plus
989 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
991 // If all the node's predecessors are scheduled, this node is ready
992 // to be scheduled. Ignore the special ExitSU node.
993 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
994 PendingQueue.push_back(SuccSU);
997 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
998 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
999 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1001 ReleaseSucc(SU, &*I);
1004 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1005 /// count of its successors. If a successor pending count is zero, add it to
1006 /// the Available queue.
1007 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1008 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
1009 DEBUG(SU->dump(this));
1011 Sequence.push_back(SU);
1012 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1013 SU->setDepthToAtLeast(CurCycle);
1015 ReleaseSuccessors(SU);
1016 SU->isScheduled = true;
1017 AvailableQueue.ScheduledNode(SU);
1020 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1022 void SchedulePostRATDList::ListScheduleTopDown() {
1023 unsigned CurCycle = 0;
1025 // Release any successors of the special Entry node.
1026 ReleaseSuccessors(&EntrySU);
1028 // All leaves to Available queue.
1029 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1030 // It is available if it has no predecessors.
1031 if (SUnits[i].Preds.empty()) {
1032 AvailableQueue.push(&SUnits[i]);
1033 SUnits[i].isAvailable = true;
1037 // In any cycle where we can't schedule any instructions, we must
1038 // stall or emit a noop, depending on the target.
1039 bool CycleHasInsts = false;
1041 // While Available queue is not empty, grab the node with the highest
1042 // priority. If it is not ready put it back. Schedule the node.
1043 std::vector<SUnit*> NotReady;
1044 Sequence.reserve(SUnits.size());
1045 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1046 // Check to see if any of the pending instructions are ready to issue. If
1047 // so, add them to the available queue.
1048 unsigned MinDepth = ~0u;
1049 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
1050 if (PendingQueue[i]->getDepth() <= CurCycle) {
1051 AvailableQueue.push(PendingQueue[i]);
1052 PendingQueue[i]->isAvailable = true;
1053 PendingQueue[i] = PendingQueue.back();
1054 PendingQueue.pop_back();
1056 } else if (PendingQueue[i]->getDepth() < MinDepth)
1057 MinDepth = PendingQueue[i]->getDepth();
1060 DEBUG(errs() << "\n*** Examining Available\n";
1061 LatencyPriorityQueue q = AvailableQueue;
1062 while (!q.empty()) {
1063 SUnit *su = q.pop();
1064 errs() << "Height " << su->getHeight() << ": ";
1068 SUnit *FoundSUnit = 0;
1070 bool HasNoopHazards = false;
1071 while (!AvailableQueue.empty()) {
1072 SUnit *CurSUnit = AvailableQueue.pop();
1074 ScheduleHazardRecognizer::HazardType HT =
1075 HazardRec->getHazardType(CurSUnit);
1076 if (HT == ScheduleHazardRecognizer::NoHazard) {
1077 FoundSUnit = CurSUnit;
1081 // Remember if this is a noop hazard.
1082 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1084 NotReady.push_back(CurSUnit);
1087 // Add the nodes that aren't ready back onto the available list.
1088 if (!NotReady.empty()) {
1089 AvailableQueue.push_all(NotReady);
1093 // If we found a node to schedule, do it now.
1095 ScheduleNodeTopDown(FoundSUnit, CurCycle);
1096 HazardRec->EmitInstruction(FoundSUnit);
1097 CycleHasInsts = true;
1099 // If we are using the target-specific hazards, then don't
1100 // advance the cycle time just because we schedule a node. If
1101 // the target allows it we can schedule multiple nodes in the
1103 if (!EnablePostRAHazardAvoidance) {
1104 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1108 if (CycleHasInsts) {
1109 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1110 HazardRec->AdvanceCycle();
1111 } else if (!HasNoopHazards) {
1112 // Otherwise, we have a pipeline stall, but no other problem,
1113 // just advance the current cycle and try again.
1114 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1115 HazardRec->AdvanceCycle();
1118 // Otherwise, we have no instructions to issue and we have instructions
1119 // that will fault if we don't do this right. This is the case for
1120 // processors without pipeline interlocks and other cases.
1121 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1122 HazardRec->EmitNoop();
1123 Sequence.push_back(0); // NULL here means noop
1128 CycleHasInsts = false;
1133 VerifySchedule(/*isBottomUp=*/false);
1137 //===----------------------------------------------------------------------===//
1138 // Public Constructor Functions
1139 //===----------------------------------------------------------------------===//
1141 FunctionPass *llvm::createPostRAScheduler() {
1142 return new PostRAScheduler();