1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ExactHazardRecognizer.h"
23 #include "SimpleHazardRecognizer.h"
24 #include "ScheduleDAGInstrs.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/SchedulerRegistry.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFunctionPass.h"
30 #include "llvm/CodeGen/MachineLoopInfo.h"
31 #include "llvm/CodeGen/MachineRegisterInfo.h"
32 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
33 #include "llvm/Target/TargetLowering.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetRegisterInfo.h"
37 #include "llvm/Support/Compiler.h"
38 #include "llvm/Support/Debug.h"
39 #include "llvm/Support/ErrorHandling.h"
40 #include "llvm/Support/raw_ostream.h"
41 #include "llvm/ADT/Statistic.h"
45 STATISTIC(NumNoops, "Number of noops inserted");
46 STATISTIC(NumStalls, "Number of pipeline stalls");
49 EnableAntiDepBreaking("break-anti-dependencies",
50 cl::desc("Break post-RA scheduling anti-dependencies"),
51 cl::init(true), cl::Hidden);
54 EnablePostRAHazardAvoidance("avoid-hazards",
55 cl::desc("Enable exact hazard avoidance"),
56 cl::init(false), cl::Hidden);
59 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
62 PostRAScheduler() : MachineFunctionPass(&ID) {}
64 void getAnalysisUsage(AnalysisUsage &AU) const {
66 AU.addRequired<MachineDominatorTree>();
67 AU.addPreserved<MachineDominatorTree>();
68 AU.addRequired<MachineLoopInfo>();
69 AU.addPreserved<MachineLoopInfo>();
70 MachineFunctionPass::getAnalysisUsage(AU);
73 const char *getPassName() const {
74 return "Post RA top-down list latency scheduler";
77 bool runOnMachineFunction(MachineFunction &Fn);
79 char PostRAScheduler::ID = 0;
81 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
82 /// AvailableQueue - The priority queue to use for the available SUnits.
84 LatencyPriorityQueue AvailableQueue;
86 /// PendingQueue - This contains all of the instructions whose operands have
87 /// been issued, but their results are not ready yet (due to the latency of
88 /// the operation). Once the operands becomes available, the instruction is
89 /// added to the AvailableQueue.
90 std::vector<SUnit*> PendingQueue;
92 /// Topo - A topological ordering for SUnits.
93 ScheduleDAGTopologicalSort Topo;
95 /// AllocatableSet - The set of allocatable registers.
96 /// We'll be ignoring anti-dependencies on non-allocatable registers,
97 /// because they may not be safe to break.
98 const BitVector AllocatableSet;
100 /// HazardRec - The hazard recognizer to use.
101 ScheduleHazardRecognizer *HazardRec;
103 /// Classes - For live regs that are only used in one register class in a
104 /// live range, the register class. If the register is not live, the
105 /// corresponding value is null. If the register is live but used in
106 /// multiple register classes, the corresponding value is -1 casted to a
108 const TargetRegisterClass *
109 Classes[TargetRegisterInfo::FirstVirtualRegister];
111 /// RegRegs - Map registers to all their references within a live range.
112 std::multimap<unsigned, MachineOperand *> RegRefs;
114 /// The index of the most recent kill (proceding bottom-up), or ~0u if
115 /// the register is not live.
116 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
118 /// The index of the most recent complete def (proceding bottom up), or ~0u
119 /// if the register is live.
120 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
123 SchedulePostRATDList(MachineFunction &MF,
124 const MachineLoopInfo &MLI,
125 const MachineDominatorTree &MDT,
126 ScheduleHazardRecognizer *HR)
127 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
128 AllocatableSet(TRI->getAllocatableSet(MF)),
131 ~SchedulePostRATDList() {
135 /// StartBlock - Initialize register live-range state for scheduling in
138 void StartBlock(MachineBasicBlock *BB);
140 /// Schedule - Schedule the instruction range using list scheduling.
144 /// Observe - Update liveness information to account for the current
145 /// instruction, which will not be scheduled.
147 void Observe(MachineInstr *MI, unsigned Count);
149 /// FinishBlock - Clean up register live-range state.
154 void PrescanInstruction(MachineInstr *MI);
155 void ScanInstruction(MachineInstr *MI, unsigned Count);
156 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
157 void ReleaseSuccessors(SUnit *SU);
158 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
159 void ListScheduleTopDown();
160 bool BreakAntiDependencies();
164 /// isSchedulingBoundary - Test if the given instruction should be
165 /// considered a scheduling boundary. This primarily includes labels
168 static bool isSchedulingBoundary(const MachineInstr *MI,
169 const MachineFunction &MF) {
170 // Terminators and labels can't be scheduled around.
171 if (MI->getDesc().isTerminator() || MI->isLabel())
174 // Don't attempt to schedule around any instruction that modifies
175 // a stack-oriented pointer, as it's unlikely to be profitable. This
176 // saves compile time, because it doesn't require every single
177 // stack slot reference to depend on the instruction that does the
179 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
180 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
186 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
187 DEBUG(errs() << "PostRAScheduler\n");
189 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
190 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
191 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
192 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
193 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
194 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
196 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
198 // Loop over all of the basic blocks
199 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
200 MBB != MBBe; ++MBB) {
201 // Initialize register live-range state for scheduling in this block.
202 Scheduler.StartBlock(MBB);
204 // Schedule each sequence of instructions not interrupted by a label
205 // or anything else that effectively needs to shut down scheduling.
206 MachineBasicBlock::iterator Current = MBB->end();
207 unsigned Count = MBB->size(), CurrentCount = Count;
208 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
209 MachineInstr *MI = prior(I);
210 if (isSchedulingBoundary(MI, Fn)) {
211 Scheduler.Run(MBB, I, Current, CurrentCount);
212 Scheduler.EmitSchedule();
214 CurrentCount = Count - 1;
215 Scheduler.Observe(MI, CurrentCount);
220 assert(Count == 0 && "Instruction count mismatch!");
221 assert((MBB->begin() == Current || CurrentCount != 0) &&
222 "Instruction count mismatch!");
223 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
224 Scheduler.EmitSchedule();
226 // Clean up register live-range state.
227 Scheduler.FinishBlock();
233 /// StartBlock - Initialize register live-range state for scheduling in
236 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
237 // Call the superclass.
238 ScheduleDAGInstrs::StartBlock(BB);
240 // Reset the hazard recognizer.
243 // Clear out the register class data.
244 std::fill(Classes, array_endof(Classes),
245 static_cast<const TargetRegisterClass *>(0));
247 // Initialize the indices to indicate that no registers are live.
248 std::fill(KillIndices, array_endof(KillIndices), ~0u);
249 std::fill(DefIndices, array_endof(DefIndices), BB->size());
251 // Determine the live-out physregs for this block.
252 if (!BB->empty() && BB->back().getDesc().isReturn())
253 // In a return block, examine the function live-out regs.
254 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
255 E = MRI.liveout_end(); I != E; ++I) {
257 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
258 KillIndices[Reg] = BB->size();
259 DefIndices[Reg] = ~0u;
260 // Repeat, for all aliases.
261 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
262 unsigned AliasReg = *Alias;
263 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
264 KillIndices[AliasReg] = BB->size();
265 DefIndices[AliasReg] = ~0u;
269 // In a non-return block, examine the live-in regs of all successors.
270 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
271 SE = BB->succ_end(); SI != SE; ++SI)
272 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
273 E = (*SI)->livein_end(); I != E; ++I) {
275 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
276 KillIndices[Reg] = BB->size();
277 DefIndices[Reg] = ~0u;
278 // Repeat, for all aliases.
279 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
280 unsigned AliasReg = *Alias;
281 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
282 KillIndices[AliasReg] = BB->size();
283 DefIndices[AliasReg] = ~0u;
287 // Consider callee-saved registers as live-out, since we're running after
288 // prologue/epilogue insertion so there's no way to add additional
291 // TODO: If the callee saves and restores these, then we can potentially
292 // use them between the save and the restore. To do that, we could scan
293 // the exit blocks to see which of these registers are defined.
294 // Alternatively, callee-saved registers that aren't saved and restored
295 // could be marked live-in in every block.
296 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
298 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
299 KillIndices[Reg] = BB->size();
300 DefIndices[Reg] = ~0u;
301 // Repeat, for all aliases.
302 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
303 unsigned AliasReg = *Alias;
304 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
305 KillIndices[AliasReg] = BB->size();
306 DefIndices[AliasReg] = ~0u;
311 /// Schedule - Schedule the instruction range using list scheduling.
313 void SchedulePostRATDList::Schedule() {
314 DEBUG(errs() << "********** List Scheduling **********\n");
316 // Build the scheduling graph.
319 if (EnableAntiDepBreaking) {
320 if (BreakAntiDependencies()) {
321 // We made changes. Update the dependency graph.
322 // Theoretically we could update the graph in place:
323 // When a live range is changed to use a different register, remove
324 // the def's anti-dependence *and* output-dependence edges due to
325 // that register, and add new anti-dependence and output-dependence
326 // edges based on the next live range of the register.
334 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
335 SUnits[su].dumpAll(this));
337 AvailableQueue.initNodes(SUnits);
339 ListScheduleTopDown();
341 AvailableQueue.releaseState();
344 /// Observe - Update liveness information to account for the current
345 /// instruction, which will not be scheduled.
347 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
348 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
350 // Any register which was defined within the previous scheduling region
351 // may have been rescheduled and its lifetime may overlap with registers
352 // in ways not reflected in our current liveness state. For each such
353 // register, adjust the liveness state to be conservatively correct.
354 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
355 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
356 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
357 // Mark this register to be non-renamable.
358 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
359 // Move the def index to the end of the previous region, to reflect
360 // that the def could theoretically have been scheduled at the end.
361 DefIndices[Reg] = InsertPosIndex;
364 PrescanInstruction(MI);
365 ScanInstruction(MI, Count);
368 /// FinishBlock - Clean up register live-range state.
370 void SchedulePostRATDList::FinishBlock() {
373 // Call the superclass.
374 ScheduleDAGInstrs::FinishBlock();
377 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
379 static SDep *CriticalPathStep(SUnit *SU) {
381 unsigned NextDepth = 0;
382 // Find the predecessor edge with the greatest depth.
383 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
385 SUnit *PredSU = P->getSUnit();
386 unsigned PredLatency = P->getLatency();
387 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
388 // In the case of a latency tie, prefer an anti-dependency edge over
389 // other types of edges.
390 if (NextDepth < PredTotalLatency ||
391 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
392 NextDepth = PredTotalLatency;
399 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
400 // Scan the register operands for this instruction and update
401 // Classes and RegRefs.
402 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
403 MachineOperand &MO = MI->getOperand(i);
404 if (!MO.isReg()) continue;
405 unsigned Reg = MO.getReg();
406 if (Reg == 0) continue;
407 const TargetRegisterClass *NewRC = 0;
409 if (i < MI->getDesc().getNumOperands())
410 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
412 // For now, only allow the register to be changed if its register
413 // class is consistent across all uses.
414 if (!Classes[Reg] && NewRC)
415 Classes[Reg] = NewRC;
416 else if (!NewRC || Classes[Reg] != NewRC)
417 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
419 // Now check for aliases.
420 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
421 // If an alias of the reg is used during the live range, give up.
422 // Note that this allows us to skip checking if AntiDepReg
423 // overlaps with any of the aliases, among other things.
424 unsigned AliasReg = *Alias;
425 if (Classes[AliasReg]) {
426 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
427 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
431 // If we're still willing to consider this register, note the reference.
432 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
433 RegRefs.insert(std::make_pair(Reg, &MO));
437 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
440 // Proceding upwards, registers that are defed but not used in this
441 // instruction are now dead.
442 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
443 MachineOperand &MO = MI->getOperand(i);
444 if (!MO.isReg()) continue;
445 unsigned Reg = MO.getReg();
446 if (Reg == 0) continue;
447 if (!MO.isDef()) continue;
448 // Ignore two-addr defs.
449 if (MI->isRegTiedToUseOperand(i)) continue;
451 DefIndices[Reg] = Count;
452 KillIndices[Reg] = ~0u;
453 assert(((KillIndices[Reg] == ~0u) !=
454 (DefIndices[Reg] == ~0u)) &&
455 "Kill and Def maps aren't consistent for Reg!");
458 // Repeat, for all subregs.
459 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
461 unsigned SubregReg = *Subreg;
462 DefIndices[SubregReg] = Count;
463 KillIndices[SubregReg] = ~0u;
464 Classes[SubregReg] = 0;
465 RegRefs.erase(SubregReg);
467 // Conservatively mark super-registers as unusable.
468 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
470 unsigned SuperReg = *Super;
471 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
474 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
475 MachineOperand &MO = MI->getOperand(i);
476 if (!MO.isReg()) continue;
477 unsigned Reg = MO.getReg();
478 if (Reg == 0) continue;
479 if (!MO.isUse()) continue;
481 const TargetRegisterClass *NewRC = 0;
482 if (i < MI->getDesc().getNumOperands())
483 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
485 // For now, only allow the register to be changed if its register
486 // class is consistent across all uses.
487 if (!Classes[Reg] && NewRC)
488 Classes[Reg] = NewRC;
489 else if (!NewRC || Classes[Reg] != NewRC)
490 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
492 RegRefs.insert(std::make_pair(Reg, &MO));
494 // It wasn't previously live but now it is, this is a kill.
495 if (KillIndices[Reg] == ~0u) {
496 KillIndices[Reg] = Count;
497 DefIndices[Reg] = ~0u;
498 assert(((KillIndices[Reg] == ~0u) !=
499 (DefIndices[Reg] == ~0u)) &&
500 "Kill and Def maps aren't consistent for Reg!");
502 // Repeat, for all aliases.
503 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
504 unsigned AliasReg = *Alias;
505 if (KillIndices[AliasReg] == ~0u) {
506 KillIndices[AliasReg] = Count;
507 DefIndices[AliasReg] = ~0u;
513 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
514 /// of the ScheduleDAG and break them by renaming registers.
516 bool SchedulePostRATDList::BreakAntiDependencies() {
517 // The code below assumes that there is at least one instruction,
518 // so just duck out immediately if the block is empty.
519 if (SUnits.empty()) return false;
521 // Find the node at the bottom of the critical path.
523 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
524 SUnit *SU = &SUnits[i];
525 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
529 DEBUG(errs() << "Critical path has total latency "
530 << (Max->getDepth() + Max->Latency) << "\n");
532 // Track progress along the critical path through the SUnit graph as we walk
534 SUnit *CriticalPathSU = Max;
535 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
537 // Consider this pattern:
546 // There are three anti-dependencies here, and without special care,
547 // we'd break all of them using the same register:
556 // because at each anti-dependence, B is the first register that
557 // isn't A which is free. This re-introduces anti-dependencies
558 // at all but one of the original anti-dependencies that we were
559 // trying to break. To avoid this, keep track of the most recent
560 // register that each register was replaced with, avoid
561 // using it to repair an anti-dependence on the same register.
562 // This lets us produce this:
571 // This still has an anti-dependence on B, but at least it isn't on the
572 // original critical path.
574 // TODO: If we tracked more than one register here, we could potentially
575 // fix that remaining critical edge too. This is a little more involved,
576 // because unlike the most recent register, less recent registers should
577 // still be considered, though only if no other registers are available.
578 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
580 // Attempt to break anti-dependence edges on the critical path. Walk the
581 // instructions from the bottom up, tracking information about liveness
582 // as we go to help determine which registers are available.
583 bool Changed = false;
584 unsigned Count = InsertPosIndex - 1;
585 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
587 MachineInstr *MI = --I;
589 // After regalloc, IMPLICIT_DEF instructions aren't safe to treat as
590 // dependence-breaking. In the case of an INSERT_SUBREG, the IMPLICIT_DEF
591 // is left behind appearing to clobber the super-register, while the
592 // subregister needs to remain live. So we just ignore them.
593 if (MI->getOpcode() == TargetInstrInfo::IMPLICIT_DEF)
596 // Check if this instruction has a dependence on the critical path that
597 // is an anti-dependence that we may be able to break. If it is, set
598 // AntiDepReg to the non-zero register associated with the anti-dependence.
600 // We limit our attention to the critical path as a heuristic to avoid
601 // breaking anti-dependence edges that aren't going to significantly
602 // impact the overall schedule. There are a limited number of registers
603 // and we want to save them for the important edges.
605 // TODO: Instructions with multiple defs could have multiple
606 // anti-dependencies. The current code here only knows how to break one
607 // edge per instruction. Note that we'd have to be able to break all of
608 // the anti-dependencies in an instruction in order to be effective.
609 unsigned AntiDepReg = 0;
610 if (MI == CriticalPathMI) {
611 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
612 SUnit *NextSU = Edge->getSUnit();
614 // Only consider anti-dependence edges.
615 if (Edge->getKind() == SDep::Anti) {
616 AntiDepReg = Edge->getReg();
617 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
618 // Don't break anti-dependencies on non-allocatable registers.
619 if (!AllocatableSet.test(AntiDepReg))
622 // If the SUnit has other dependencies on the SUnit that it
623 // anti-depends on, don't bother breaking the anti-dependency
624 // since those edges would prevent such units from being
625 // scheduled past each other regardless.
627 // Also, if there are dependencies on other SUnits with the
628 // same register as the anti-dependency, don't attempt to
630 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
631 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
632 if (P->getSUnit() == NextSU ?
633 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
634 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
640 CriticalPathSU = NextSU;
641 CriticalPathMI = CriticalPathSU->getInstr();
643 // We've reached the end of the critical path.
649 PrescanInstruction(MI);
651 // If this instruction has a use of AntiDepReg, breaking it
653 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
654 MachineOperand &MO = MI->getOperand(i);
655 if (!MO.isReg()) continue;
656 unsigned Reg = MO.getReg();
657 if (Reg == 0) continue;
658 if (MO.isUse() && AntiDepReg == Reg) {
664 // Determine AntiDepReg's register class, if it is live and is
665 // consistently used within a single class.
666 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
667 assert((AntiDepReg == 0 || RC != NULL) &&
668 "Register should be live if it's causing an anti-dependence!");
669 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
672 // Look for a suitable register to use to break the anti-depenence.
674 // TODO: Instead of picking the first free register, consider which might
676 if (AntiDepReg != 0) {
677 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
678 RE = RC->allocation_order_end(MF); R != RE; ++R) {
679 unsigned NewReg = *R;
680 // Don't replace a register with itself.
681 if (NewReg == AntiDepReg) continue;
682 // Don't replace a register with one that was recently used to repair
683 // an anti-dependence with this AntiDepReg, because that would
684 // re-introduce that anti-dependence.
685 if (NewReg == LastNewReg[AntiDepReg]) continue;
686 // If NewReg is dead and NewReg's most recent def is not before
687 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
688 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
689 "Kill and Def maps aren't consistent for AntiDepReg!");
690 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
691 "Kill and Def maps aren't consistent for NewReg!");
692 if (KillIndices[NewReg] == ~0u &&
693 Classes[NewReg] != reinterpret_cast<TargetRegisterClass *>(-1) &&
694 KillIndices[AntiDepReg] <= DefIndices[NewReg]) {
695 DEBUG(errs() << "Breaking anti-dependence edge on "
696 << TRI->getName(AntiDepReg)
697 << " with " << RegRefs.count(AntiDepReg) << " references"
698 << " using " << TRI->getName(NewReg) << "!\n");
700 // Update the references to the old register to refer to the new
702 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
703 std::multimap<unsigned, MachineOperand *>::iterator>
704 Range = RegRefs.equal_range(AntiDepReg);
705 for (std::multimap<unsigned, MachineOperand *>::iterator
706 Q = Range.first, QE = Range.second; Q != QE; ++Q)
707 Q->second->setReg(NewReg);
709 // We just went back in time and modified history; the
710 // liveness information for the anti-depenence reg is now
711 // inconsistent. Set the state as if it were dead.
712 Classes[NewReg] = Classes[AntiDepReg];
713 DefIndices[NewReg] = DefIndices[AntiDepReg];
714 KillIndices[NewReg] = KillIndices[AntiDepReg];
715 assert(((KillIndices[NewReg] == ~0u) !=
716 (DefIndices[NewReg] == ~0u)) &&
717 "Kill and Def maps aren't consistent for NewReg!");
719 Classes[AntiDepReg] = 0;
720 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
721 KillIndices[AntiDepReg] = ~0u;
722 assert(((KillIndices[AntiDepReg] == ~0u) !=
723 (DefIndices[AntiDepReg] == ~0u)) &&
724 "Kill and Def maps aren't consistent for AntiDepReg!");
726 RegRefs.erase(AntiDepReg);
728 LastNewReg[AntiDepReg] = NewReg;
734 ScanInstruction(MI, Count);
740 //===----------------------------------------------------------------------===//
741 // Top-Down Scheduling
742 //===----------------------------------------------------------------------===//
744 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
745 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
746 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
747 SUnit *SuccSU = SuccEdge->getSUnit();
748 --SuccSU->NumPredsLeft;
751 if (SuccSU->NumPredsLeft < 0) {
752 cerr << "*** Scheduling failed! ***\n";
754 cerr << " has been released too many times!\n";
759 // Compute how many cycles it will be before this actually becomes
760 // available. This is the max of the start time of all predecessors plus
762 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
764 // If all the node's predecessors are scheduled, this node is ready
765 // to be scheduled. Ignore the special ExitSU node.
766 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
767 PendingQueue.push_back(SuccSU);
770 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
771 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
772 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
774 ReleaseSucc(SU, &*I);
777 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
778 /// count of its successors. If a successor pending count is zero, add it to
779 /// the Available queue.
780 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
781 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
782 DEBUG(SU->dump(this));
784 Sequence.push_back(SU);
785 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
786 SU->setDepthToAtLeast(CurCycle);
788 ReleaseSuccessors(SU);
789 SU->isScheduled = true;
790 AvailableQueue.ScheduledNode(SU);
793 /// ListScheduleTopDown - The main loop of list scheduling for top-down
795 void SchedulePostRATDList::ListScheduleTopDown() {
796 unsigned CurCycle = 0;
798 // Release any successors of the special Entry node.
799 ReleaseSuccessors(&EntrySU);
801 // All leaves to Available queue.
802 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
803 // It is available if it has no predecessors.
804 if (SUnits[i].Preds.empty()) {
805 AvailableQueue.push(&SUnits[i]);
806 SUnits[i].isAvailable = true;
810 // While Available queue is not empty, grab the node with the highest
811 // priority. If it is not ready put it back. Schedule the node.
812 std::vector<SUnit*> NotReady;
813 Sequence.reserve(SUnits.size());
814 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
815 // Check to see if any of the pending instructions are ready to issue. If
816 // so, add them to the available queue.
817 unsigned MinDepth = ~0u;
818 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
819 if (PendingQueue[i]->getDepth() <= CurCycle) {
820 AvailableQueue.push(PendingQueue[i]);
821 PendingQueue[i]->isAvailable = true;
822 PendingQueue[i] = PendingQueue.back();
823 PendingQueue.pop_back();
825 } else if (PendingQueue[i]->getDepth() < MinDepth)
826 MinDepth = PendingQueue[i]->getDepth();
829 DEBUG(errs() << "\n*** Examining Available\n";
830 LatencyPriorityQueue q = AvailableQueue;
833 errs() << "Height " << su->getHeight() << ": ";
837 SUnit *FoundSUnit = 0;
839 bool HasNoopHazards = false;
840 while (!AvailableQueue.empty()) {
841 SUnit *CurSUnit = AvailableQueue.pop();
843 ScheduleHazardRecognizer::HazardType HT =
844 HazardRec->getHazardType(CurSUnit);
845 if (HT == ScheduleHazardRecognizer::NoHazard) {
846 FoundSUnit = CurSUnit;
850 // Remember if this is a noop hazard.
851 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
853 NotReady.push_back(CurSUnit);
856 // Add the nodes that aren't ready back onto the available list.
857 if (!NotReady.empty()) {
858 AvailableQueue.push_all(NotReady);
862 // If we found a node to schedule, do it now.
864 ScheduleNodeTopDown(FoundSUnit, CurCycle);
865 HazardRec->EmitInstruction(FoundSUnit);
867 // If we are using the target-specific hazards, then don't
868 // advance the cycle time just because we schedule a node. If
869 // the target allows it we can schedule multiple nodes in the
871 if (!EnablePostRAHazardAvoidance) {
872 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
875 } else if (!HasNoopHazards) {
876 // Otherwise, we have a pipeline stall, but no other problem, just advance
877 // the current cycle and try again.
878 DEBUG(errs() << "*** Advancing cycle, no work to do\n");
879 HazardRec->AdvanceCycle();
883 // Otherwise, we have no instructions to issue and we have instructions
884 // that will fault if we don't do this right. This is the case for
885 // processors without pipeline interlocks and other cases.
886 DEBUG(errs() << "*** Emitting noop\n");
887 HazardRec->EmitNoop();
888 Sequence.push_back(0); // NULL here means noop
895 VerifySchedule(/*isBottomUp=*/false);
899 //===----------------------------------------------------------------------===//
900 // Public Constructor Functions
901 //===----------------------------------------------------------------------===//
903 FunctionPass *llvm::createPostRAScheduler() {
904 return new PostRAScheduler();