1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ExactHazardRecognizer.h"
23 #include "SimpleHazardRecognizer.h"
24 #include "ScheduleDAGInstrs.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/SchedulerRegistry.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunctionPass.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
34 #include "llvm/Target/TargetLowering.h"
35 #include "llvm/Target/TargetMachine.h"
36 #include "llvm/Target/TargetInstrInfo.h"
37 #include "llvm/Target/TargetRegisterInfo.h"
38 #include "llvm/Target/TargetSubtarget.h"
39 #include "llvm/Support/Compiler.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/ErrorHandling.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/ADT/Statistic.h"
48 STATISTIC(NumNoops, "Number of noops inserted");
49 STATISTIC(NumStalls, "Number of pipeline stalls");
51 // Post-RA scheduling is enabled with
52 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
53 // override the target.
55 EnablePostRAScheduler("post-RA-scheduler",
56 cl::desc("Enable scheduling after register allocation"),
57 cl::init(false), cl::Hidden);
59 EnableAntiDepBreaking("break-anti-dependencies",
60 cl::desc("Break post-RA scheduling anti-dependencies"),
61 cl::init(true), cl::Hidden);
63 EnablePostRAHazardAvoidance("avoid-hazards",
64 cl::desc("Enable exact hazard avoidance"),
65 cl::init(true), cl::Hidden);
67 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
69 DebugDiv("postra-sched-debugdiv",
70 cl::desc("Debug control MBBs that are scheduled"),
71 cl::init(0), cl::Hidden);
73 DebugMod("postra-sched-debugmod",
74 cl::desc("Debug control MBBs that are scheduled"),
75 cl::init(0), cl::Hidden);
78 class VISIBILITY_HIDDEN PostRAScheduler : public MachineFunctionPass {
81 PostRAScheduler() : MachineFunctionPass(&ID) {}
83 void getAnalysisUsage(AnalysisUsage &AU) const {
85 AU.addRequired<MachineDominatorTree>();
86 AU.addPreserved<MachineDominatorTree>();
87 AU.addRequired<MachineLoopInfo>();
88 AU.addPreserved<MachineLoopInfo>();
89 MachineFunctionPass::getAnalysisUsage(AU);
92 const char *getPassName() const {
93 return "Post RA top-down list latency scheduler";
96 bool runOnMachineFunction(MachineFunction &Fn);
98 char PostRAScheduler::ID = 0;
100 class VISIBILITY_HIDDEN SchedulePostRATDList : public ScheduleDAGInstrs {
101 /// AvailableQueue - The priority queue to use for the available SUnits.
103 LatencyPriorityQueue AvailableQueue;
105 /// PendingQueue - This contains all of the instructions whose operands have
106 /// been issued, but their results are not ready yet (due to the latency of
107 /// the operation). Once the operands becomes available, the instruction is
108 /// added to the AvailableQueue.
109 std::vector<SUnit*> PendingQueue;
111 /// Topo - A topological ordering for SUnits.
112 ScheduleDAGTopologicalSort Topo;
114 /// AllocatableSet - The set of allocatable registers.
115 /// We'll be ignoring anti-dependencies on non-allocatable registers,
116 /// because they may not be safe to break.
117 const BitVector AllocatableSet;
119 /// HazardRec - The hazard recognizer to use.
120 ScheduleHazardRecognizer *HazardRec;
122 /// Classes - For live regs that are only used in one register class in a
123 /// live range, the register class. If the register is not live, the
124 /// corresponding value is null. If the register is live but used in
125 /// multiple register classes, the corresponding value is -1 casted to a
127 const TargetRegisterClass *
128 Classes[TargetRegisterInfo::FirstVirtualRegister];
130 /// RegRegs - Map registers to all their references within a live range.
131 std::multimap<unsigned, MachineOperand *> RegRefs;
133 /// KillIndices - The index of the most recent kill (proceding bottom-up),
134 /// or ~0u if the register is not live.
135 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
137 /// DefIndices - The index of the most recent complete def (proceding bottom
138 /// up), or ~0u if the register is live.
139 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
141 /// KeepRegs - A set of registers which are live and cannot be changed to
142 /// break anti-dependencies.
143 SmallSet<unsigned, 4> KeepRegs;
146 SchedulePostRATDList(MachineFunction &MF,
147 const MachineLoopInfo &MLI,
148 const MachineDominatorTree &MDT,
149 ScheduleHazardRecognizer *HR)
150 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
151 AllocatableSet(TRI->getAllocatableSet(MF)),
154 ~SchedulePostRATDList() {
158 /// StartBlock - Initialize register live-range state for scheduling in
161 void StartBlock(MachineBasicBlock *BB);
163 /// Schedule - Schedule the instruction range using list scheduling.
167 /// FixupKills - Fix register kill flags that have been made
168 /// invalid due to scheduling
170 void FixupKills(MachineBasicBlock *MBB);
172 /// Observe - Update liveness information to account for the current
173 /// instruction, which will not be scheduled.
175 void Observe(MachineInstr *MI, unsigned Count);
177 /// FinishBlock - Clean up register live-range state.
182 void PrescanInstruction(MachineInstr *MI);
183 void ScanInstruction(MachineInstr *MI, unsigned Count);
184 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
185 void ReleaseSuccessors(SUnit *SU);
186 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
187 void ListScheduleTopDown();
188 bool BreakAntiDependencies();
189 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
191 const TargetRegisterClass *);
192 void StartBlockForKills(MachineBasicBlock *BB);
194 // ToggleKillFlag - Toggle a register operand kill flag. Other
195 // adjustments may be made to the instruction if necessary. Return
196 // true if the operand has been deleted, false if not.
197 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
201 /// isSchedulingBoundary - Test if the given instruction should be
202 /// considered a scheduling boundary. This primarily includes labels
205 static bool isSchedulingBoundary(const MachineInstr *MI,
206 const MachineFunction &MF) {
207 // Terminators and labels can't be scheduled around.
208 if (MI->getDesc().isTerminator() || MI->isLabel())
211 // Don't attempt to schedule around any instruction that modifies
212 // a stack-oriented pointer, as it's unlikely to be profitable. This
213 // saves compile time, because it doesn't require every single
214 // stack slot reference to depend on the instruction that does the
216 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
217 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
223 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
224 // Check for explicit enable/disable of post-ra scheduling.
225 if (EnablePostRAScheduler.getPosition() > 0) {
226 if (!EnablePostRAScheduler)
229 // Check that post-RA scheduling is enabled for this function
230 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
231 if (!ST.enablePostRAScheduler())
235 DEBUG(errs() << "PostRAScheduler\n");
237 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
238 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
239 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
240 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
241 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
242 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
244 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR);
246 // Loop over all of the basic blocks
247 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
248 MBB != MBBe; ++MBB) {
250 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
252 static int bbcnt = 0;
253 if (bbcnt++ % DebugDiv != DebugMod)
255 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
256 ":MBB ID#" << MBB->getNumber() << " ***\n";
260 // Initialize register live-range state for scheduling in this block.
261 Scheduler.StartBlock(MBB);
263 // Schedule each sequence of instructions not interrupted by a label
264 // or anything else that effectively needs to shut down scheduling.
265 MachineBasicBlock::iterator Current = MBB->end();
266 unsigned Count = MBB->size(), CurrentCount = Count;
267 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
268 MachineInstr *MI = prior(I);
269 if (isSchedulingBoundary(MI, Fn)) {
270 Scheduler.Run(MBB, I, Current, CurrentCount);
271 Scheduler.EmitSchedule(0);
273 CurrentCount = Count - 1;
274 Scheduler.Observe(MI, CurrentCount);
279 assert(Count == 0 && "Instruction count mismatch!");
280 assert((MBB->begin() == Current || CurrentCount != 0) &&
281 "Instruction count mismatch!");
282 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
283 Scheduler.EmitSchedule(0);
285 // Clean up register live-range state.
286 Scheduler.FinishBlock();
288 // Update register kills
289 Scheduler.FixupKills(MBB);
295 /// StartBlock - Initialize register live-range state for scheduling in
298 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
299 // Call the superclass.
300 ScheduleDAGInstrs::StartBlock(BB);
302 // Reset the hazard recognizer.
305 // Clear out the register class data.
306 std::fill(Classes, array_endof(Classes),
307 static_cast<const TargetRegisterClass *>(0));
309 // Initialize the indices to indicate that no registers are live.
310 std::fill(KillIndices, array_endof(KillIndices), ~0u);
311 std::fill(DefIndices, array_endof(DefIndices), BB->size());
313 // Clear "do not change" set.
316 // Determine the live-out physregs for this block.
317 if (!BB->empty() && BB->back().getDesc().isReturn()) {
318 // In a return block, examine the function live-out regs.
319 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
320 E = MRI.liveout_end(); I != E; ++I) {
322 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
323 KillIndices[Reg] = BB->size();
324 DefIndices[Reg] = ~0u;
325 // Repeat, for all aliases.
326 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
327 unsigned AliasReg = *Alias;
328 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
329 KillIndices[AliasReg] = BB->size();
330 DefIndices[AliasReg] = ~0u;
334 // In a non-return block, examine the live-in regs of all successors.
335 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
336 SE = BB->succ_end(); SI != SE; ++SI)
337 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
338 E = (*SI)->livein_end(); I != E; ++I) {
340 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
341 KillIndices[Reg] = BB->size();
342 DefIndices[Reg] = ~0u;
343 // Repeat, for all aliases.
344 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
345 unsigned AliasReg = *Alias;
346 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
347 KillIndices[AliasReg] = BB->size();
348 DefIndices[AliasReg] = ~0u;
352 // Also mark as live-out any callee-saved registers that were not
353 // saved in the prolog.
354 const MachineFrameInfo *MFI = MF.getFrameInfo();
355 BitVector Pristine = MFI->getPristineRegs(BB);
356 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
358 if (!Pristine.test(Reg)) continue;
359 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
360 KillIndices[Reg] = BB->size();
361 DefIndices[Reg] = ~0u;
362 // Repeat, for all aliases.
363 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
364 unsigned AliasReg = *Alias;
365 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
366 KillIndices[AliasReg] = BB->size();
367 DefIndices[AliasReg] = ~0u;
373 /// Schedule - Schedule the instruction range using list scheduling.
375 void SchedulePostRATDList::Schedule() {
376 DEBUG(errs() << "********** List Scheduling **********\n");
378 // Build the scheduling graph.
381 if (EnableAntiDepBreaking) {
382 if (BreakAntiDependencies()) {
383 // We made changes. Update the dependency graph.
384 // Theoretically we could update the graph in place:
385 // When a live range is changed to use a different register, remove
386 // the def's anti-dependence *and* output-dependence edges due to
387 // that register, and add new anti-dependence and output-dependence
388 // edges based on the next live range of the register.
396 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
397 SUnits[su].dumpAll(this));
399 AvailableQueue.initNodes(SUnits);
401 ListScheduleTopDown();
403 AvailableQueue.releaseState();
406 /// Observe - Update liveness information to account for the current
407 /// instruction, which will not be scheduled.
409 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
410 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
412 // Any register which was defined within the previous scheduling region
413 // may have been rescheduled and its lifetime may overlap with registers
414 // in ways not reflected in our current liveness state. For each such
415 // register, adjust the liveness state to be conservatively correct.
416 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
417 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
418 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
419 // Mark this register to be non-renamable.
420 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
421 // Move the def index to the end of the previous region, to reflect
422 // that the def could theoretically have been scheduled at the end.
423 DefIndices[Reg] = InsertPosIndex;
426 PrescanInstruction(MI);
427 ScanInstruction(MI, Count);
430 /// FinishBlock - Clean up register live-range state.
432 void SchedulePostRATDList::FinishBlock() {
435 // Call the superclass.
436 ScheduleDAGInstrs::FinishBlock();
439 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
441 static SDep *CriticalPathStep(SUnit *SU) {
443 unsigned NextDepth = 0;
444 // Find the predecessor edge with the greatest depth.
445 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
447 SUnit *PredSU = P->getSUnit();
448 unsigned PredLatency = P->getLatency();
449 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
450 // In the case of a latency tie, prefer an anti-dependency edge over
451 // other types of edges.
452 if (NextDepth < PredTotalLatency ||
453 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
454 NextDepth = PredTotalLatency;
461 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
462 // Scan the register operands for this instruction and update
463 // Classes and RegRefs.
464 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
465 MachineOperand &MO = MI->getOperand(i);
466 if (!MO.isReg()) continue;
467 unsigned Reg = MO.getReg();
468 if (Reg == 0) continue;
469 const TargetRegisterClass *NewRC = 0;
471 if (i < MI->getDesc().getNumOperands())
472 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
474 // For now, only allow the register to be changed if its register
475 // class is consistent across all uses.
476 if (!Classes[Reg] && NewRC)
477 Classes[Reg] = NewRC;
478 else if (!NewRC || Classes[Reg] != NewRC)
479 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
481 // Now check for aliases.
482 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
483 // If an alias of the reg is used during the live range, give up.
484 // Note that this allows us to skip checking if AntiDepReg
485 // overlaps with any of the aliases, among other things.
486 unsigned AliasReg = *Alias;
487 if (Classes[AliasReg]) {
488 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
489 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
493 // If we're still willing to consider this register, note the reference.
494 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
495 RegRefs.insert(std::make_pair(Reg, &MO));
497 // It's not safe to change register allocation for source operands of
498 // that have special allocation requirements.
499 if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
500 if (KeepRegs.insert(Reg)) {
501 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
503 KeepRegs.insert(*Subreg);
509 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
512 // Proceding upwards, registers that are defed but not used in this
513 // instruction are now dead.
514 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
515 MachineOperand &MO = MI->getOperand(i);
516 if (!MO.isReg()) continue;
517 unsigned Reg = MO.getReg();
518 if (Reg == 0) continue;
519 if (!MO.isDef()) continue;
520 // Ignore two-addr defs.
521 if (MI->isRegTiedToUseOperand(i)) continue;
523 DefIndices[Reg] = Count;
524 KillIndices[Reg] = ~0u;
525 assert(((KillIndices[Reg] == ~0u) !=
526 (DefIndices[Reg] == ~0u)) &&
527 "Kill and Def maps aren't consistent for Reg!");
531 // Repeat, for all subregs.
532 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
534 unsigned SubregReg = *Subreg;
535 DefIndices[SubregReg] = Count;
536 KillIndices[SubregReg] = ~0u;
537 KeepRegs.erase(SubregReg);
538 Classes[SubregReg] = 0;
539 RegRefs.erase(SubregReg);
541 // Conservatively mark super-registers as unusable.
542 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
544 unsigned SuperReg = *Super;
545 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
548 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
549 MachineOperand &MO = MI->getOperand(i);
550 if (!MO.isReg()) continue;
551 unsigned Reg = MO.getReg();
552 if (Reg == 0) continue;
553 if (!MO.isUse()) continue;
555 const TargetRegisterClass *NewRC = 0;
556 if (i < MI->getDesc().getNumOperands())
557 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
559 // For now, only allow the register to be changed if its register
560 // class is consistent across all uses.
561 if (!Classes[Reg] && NewRC)
562 Classes[Reg] = NewRC;
563 else if (!NewRC || Classes[Reg] != NewRC)
564 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
566 RegRefs.insert(std::make_pair(Reg, &MO));
568 // It wasn't previously live but now it is, this is a kill.
569 if (KillIndices[Reg] == ~0u) {
570 KillIndices[Reg] = Count;
571 DefIndices[Reg] = ~0u;
572 assert(((KillIndices[Reg] == ~0u) !=
573 (DefIndices[Reg] == ~0u)) &&
574 "Kill and Def maps aren't consistent for Reg!");
576 // Repeat, for all aliases.
577 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
578 unsigned AliasReg = *Alias;
579 if (KillIndices[AliasReg] == ~0u) {
580 KillIndices[AliasReg] = Count;
581 DefIndices[AliasReg] = ~0u;
588 SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
590 const TargetRegisterClass *RC) {
591 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
592 RE = RC->allocation_order_end(MF); R != RE; ++R) {
593 unsigned NewReg = *R;
594 // Don't replace a register with itself.
595 if (NewReg == AntiDepReg) continue;
596 // Don't replace a register with one that was recently used to repair
597 // an anti-dependence with this AntiDepReg, because that would
598 // re-introduce that anti-dependence.
599 if (NewReg == LastNewReg) continue;
600 // If NewReg is dead and NewReg's most recent def is not before
601 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
602 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
603 "Kill and Def maps aren't consistent for AntiDepReg!");
604 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
605 "Kill and Def maps aren't consistent for NewReg!");
606 if (KillIndices[NewReg] != ~0u ||
607 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
608 KillIndices[AntiDepReg] > DefIndices[NewReg])
613 // No registers are free and available!
617 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
618 /// of the ScheduleDAG and break them by renaming registers.
620 bool SchedulePostRATDList::BreakAntiDependencies() {
621 // The code below assumes that there is at least one instruction,
622 // so just duck out immediately if the block is empty.
623 if (SUnits.empty()) return false;
625 // Find the node at the bottom of the critical path.
627 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
628 SUnit *SU = &SUnits[i];
629 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
633 DEBUG(errs() << "Critical path has total latency "
634 << (Max->getDepth() + Max->Latency) << "\n");
636 // Track progress along the critical path through the SUnit graph as we walk
638 SUnit *CriticalPathSU = Max;
639 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
641 // Consider this pattern:
650 // There are three anti-dependencies here, and without special care,
651 // we'd break all of them using the same register:
660 // because at each anti-dependence, B is the first register that
661 // isn't A which is free. This re-introduces anti-dependencies
662 // at all but one of the original anti-dependencies that we were
663 // trying to break. To avoid this, keep track of the most recent
664 // register that each register was replaced with, avoid
665 // using it to repair an anti-dependence on the same register.
666 // This lets us produce this:
675 // This still has an anti-dependence on B, but at least it isn't on the
676 // original critical path.
678 // TODO: If we tracked more than one register here, we could potentially
679 // fix that remaining critical edge too. This is a little more involved,
680 // because unlike the most recent register, less recent registers should
681 // still be considered, though only if no other registers are available.
682 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
684 // Attempt to break anti-dependence edges on the critical path. Walk the
685 // instructions from the bottom up, tracking information about liveness
686 // as we go to help determine which registers are available.
687 bool Changed = false;
688 unsigned Count = InsertPosIndex - 1;
689 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
691 MachineInstr *MI = --I;
693 // Check if this instruction has a dependence on the critical path that
694 // is an anti-dependence that we may be able to break. If it is, set
695 // AntiDepReg to the non-zero register associated with the anti-dependence.
697 // We limit our attention to the critical path as a heuristic to avoid
698 // breaking anti-dependence edges that aren't going to significantly
699 // impact the overall schedule. There are a limited number of registers
700 // and we want to save them for the important edges.
702 // TODO: Instructions with multiple defs could have multiple
703 // anti-dependencies. The current code here only knows how to break one
704 // edge per instruction. Note that we'd have to be able to break all of
705 // the anti-dependencies in an instruction in order to be effective.
706 unsigned AntiDepReg = 0;
707 if (MI == CriticalPathMI) {
708 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
709 SUnit *NextSU = Edge->getSUnit();
711 // Only consider anti-dependence edges.
712 if (Edge->getKind() == SDep::Anti) {
713 AntiDepReg = Edge->getReg();
714 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
715 if (!AllocatableSet.test(AntiDepReg))
716 // Don't break anti-dependencies on non-allocatable registers.
718 else if (KeepRegs.count(AntiDepReg))
719 // Don't break anti-dependencies if an use down below requires
720 // this exact register.
723 // If the SUnit has other dependencies on the SUnit that it
724 // anti-depends on, don't bother breaking the anti-dependency
725 // since those edges would prevent such units from being
726 // scheduled past each other regardless.
728 // Also, if there are dependencies on other SUnits with the
729 // same register as the anti-dependency, don't attempt to
731 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
732 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
733 if (P->getSUnit() == NextSU ?
734 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
735 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
741 CriticalPathSU = NextSU;
742 CriticalPathMI = CriticalPathSU->getInstr();
744 // We've reached the end of the critical path.
750 PrescanInstruction(MI);
752 if (MI->getDesc().hasExtraDefRegAllocReq())
753 // If this instruction's defs have special allocation requirement, don't
754 // break this anti-dependency.
756 else if (AntiDepReg) {
757 // If this instruction has a use of AntiDepReg, breaking it
759 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
760 MachineOperand &MO = MI->getOperand(i);
761 if (!MO.isReg()) continue;
762 unsigned Reg = MO.getReg();
763 if (Reg == 0) continue;
764 if (MO.isUse() && AntiDepReg == Reg) {
771 // Determine AntiDepReg's register class, if it is live and is
772 // consistently used within a single class.
773 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
774 assert((AntiDepReg == 0 || RC != NULL) &&
775 "Register should be live if it's causing an anti-dependence!");
776 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
779 // Look for a suitable register to use to break the anti-depenence.
781 // TODO: Instead of picking the first free register, consider which might
783 if (AntiDepReg != 0) {
784 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
785 LastNewReg[AntiDepReg],
787 DEBUG(errs() << "Breaking anti-dependence edge on "
788 << TRI->getName(AntiDepReg)
789 << " with " << RegRefs.count(AntiDepReg) << " references"
790 << " using " << TRI->getName(NewReg) << "!\n");
792 // Update the references to the old register to refer to the new
794 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
795 std::multimap<unsigned, MachineOperand *>::iterator>
796 Range = RegRefs.equal_range(AntiDepReg);
797 for (std::multimap<unsigned, MachineOperand *>::iterator
798 Q = Range.first, QE = Range.second; Q != QE; ++Q)
799 Q->second->setReg(NewReg);
801 // We just went back in time and modified history; the
802 // liveness information for the anti-depenence reg is now
803 // inconsistent. Set the state as if it were dead.
804 Classes[NewReg] = Classes[AntiDepReg];
805 DefIndices[NewReg] = DefIndices[AntiDepReg];
806 KillIndices[NewReg] = KillIndices[AntiDepReg];
807 assert(((KillIndices[NewReg] == ~0u) !=
808 (DefIndices[NewReg] == ~0u)) &&
809 "Kill and Def maps aren't consistent for NewReg!");
811 Classes[AntiDepReg] = 0;
812 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
813 KillIndices[AntiDepReg] = ~0u;
814 assert(((KillIndices[AntiDepReg] == ~0u) !=
815 (DefIndices[AntiDepReg] == ~0u)) &&
816 "Kill and Def maps aren't consistent for AntiDepReg!");
818 RegRefs.erase(AntiDepReg);
820 LastNewReg[AntiDepReg] = NewReg;
824 ScanInstruction(MI, Count);
830 /// StartBlockForKills - Initialize register live-range state for updating kills
832 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
833 // Initialize the indices to indicate that no registers are live.
834 std::fill(KillIndices, array_endof(KillIndices), ~0u);
836 // Determine the live-out physregs for this block.
837 if (!BB->empty() && BB->back().getDesc().isReturn()) {
838 // In a return block, examine the function live-out regs.
839 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
840 E = MRI.liveout_end(); I != E; ++I) {
842 KillIndices[Reg] = BB->size();
843 // Repeat, for all subregs.
844 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
846 KillIndices[*Subreg] = BB->size();
851 // In a non-return block, examine the live-in regs of all successors.
852 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
853 SE = BB->succ_end(); SI != SE; ++SI) {
854 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
855 E = (*SI)->livein_end(); I != E; ++I) {
857 KillIndices[Reg] = BB->size();
858 // Repeat, for all subregs.
859 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
861 KillIndices[*Subreg] = BB->size();
868 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
869 MachineOperand &MO) {
870 // Setting kill flag...
876 // If MO itself is live, clear the kill flag...
877 if (KillIndices[MO.getReg()] != ~0u) {
882 // If any subreg of MO is live, then create an imp-def for that
883 // subreg and keep MO marked as killed.
885 const unsigned SuperReg = MO.getReg();
886 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
888 if (KillIndices[*Subreg] != ~0u) {
889 MI->addOperand(MachineOperand::CreateReg(*Subreg,
898 MO.setIsKill(AllDead);
902 /// FixupKills - Fix the register kill flags, they may have been made
903 /// incorrect by instruction reordering.
905 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
906 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
908 std::set<unsigned> killedRegs;
909 BitVector ReservedRegs = TRI->getReservedRegs(MF);
911 StartBlockForKills(MBB);
913 // Examine block from end to start...
914 unsigned Count = MBB->size();
915 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
917 MachineInstr *MI = --I;
919 // Update liveness. Registers that are defed but not used in this
920 // instruction are now dead. Mark register and all subregs as they
921 // are completely defined.
922 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
923 MachineOperand &MO = MI->getOperand(i);
924 if (!MO.isReg()) continue;
925 unsigned Reg = MO.getReg();
926 if (Reg == 0) continue;
927 if (!MO.isDef()) continue;
928 // Ignore two-addr defs.
929 if (MI->isRegTiedToUseOperand(i)) continue;
931 KillIndices[Reg] = ~0u;
933 // Repeat for all subregs.
934 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
936 KillIndices[*Subreg] = ~0u;
940 // Examine all used registers and set/clear kill flag. When a
941 // register is used multiple times we only set the kill flag on
944 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
945 MachineOperand &MO = MI->getOperand(i);
946 if (!MO.isReg() || !MO.isUse()) continue;
947 unsigned Reg = MO.getReg();
948 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
951 if (killedRegs.find(Reg) == killedRegs.end()) {
953 // A register is not killed if any subregs are live...
954 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
956 if (KillIndices[*Subreg] != ~0u) {
962 // If subreg is not live, then register is killed if it became
963 // live in this instruction
965 kill = (KillIndices[Reg] == ~0u);
968 if (MO.isKill() != kill) {
969 bool removed = ToggleKillFlag(MI, MO);
971 DEBUG(errs() << "Fixed <removed> in ");
973 DEBUG(errs() << "Fixed " << MO << " in ");
978 killedRegs.insert(Reg);
981 // Mark any used register (that is not using undef) and subregs as
983 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
984 MachineOperand &MO = MI->getOperand(i);
985 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
986 unsigned Reg = MO.getReg();
987 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
989 KillIndices[Reg] = Count;
991 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
993 KillIndices[*Subreg] = Count;
999 //===----------------------------------------------------------------------===//
1000 // Top-Down Scheduling
1001 //===----------------------------------------------------------------------===//
1003 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1004 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
1005 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1006 SUnit *SuccSU = SuccEdge->getSUnit();
1009 if (SuccSU->NumPredsLeft == 0) {
1010 errs() << "*** Scheduling failed! ***\n";
1012 errs() << " has been released too many times!\n";
1013 llvm_unreachable(0);
1016 --SuccSU->NumPredsLeft;
1018 // Compute how many cycles it will be before this actually becomes
1019 // available. This is the max of the start time of all predecessors plus
1021 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
1023 // If all the node's predecessors are scheduled, this node is ready
1024 // to be scheduled. Ignore the special ExitSU node.
1025 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
1026 PendingQueue.push_back(SuccSU);
1029 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1030 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1031 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1033 ReleaseSucc(SU, &*I);
1036 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1037 /// count of its successors. If a successor pending count is zero, add it to
1038 /// the Available queue.
1039 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1040 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
1041 DEBUG(SU->dump(this));
1043 Sequence.push_back(SU);
1044 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1045 SU->setDepthToAtLeast(CurCycle);
1047 ReleaseSuccessors(SU);
1048 SU->isScheduled = true;
1049 AvailableQueue.ScheduledNode(SU);
1052 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1054 void SchedulePostRATDList::ListScheduleTopDown() {
1055 unsigned CurCycle = 0;
1057 // Release any successors of the special Entry node.
1058 ReleaseSuccessors(&EntrySU);
1060 // All leaves to Available queue.
1061 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1062 // It is available if it has no predecessors.
1063 if (SUnits[i].Preds.empty()) {
1064 AvailableQueue.push(&SUnits[i]);
1065 SUnits[i].isAvailable = true;
1069 // In any cycle where we can't schedule any instructions, we must
1070 // stall or emit a noop, depending on the target.
1071 bool CycleHasInsts = false;
1073 // While Available queue is not empty, grab the node with the highest
1074 // priority. If it is not ready put it back. Schedule the node.
1075 std::vector<SUnit*> NotReady;
1076 Sequence.reserve(SUnits.size());
1077 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1078 // Check to see if any of the pending instructions are ready to issue. If
1079 // so, add them to the available queue.
1080 unsigned MinDepth = ~0u;
1081 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
1082 if (PendingQueue[i]->getDepth() <= CurCycle) {
1083 AvailableQueue.push(PendingQueue[i]);
1084 PendingQueue[i]->isAvailable = true;
1085 PendingQueue[i] = PendingQueue.back();
1086 PendingQueue.pop_back();
1088 } else if (PendingQueue[i]->getDepth() < MinDepth)
1089 MinDepth = PendingQueue[i]->getDepth();
1092 DEBUG(errs() << "\n*** Examining Available\n";
1093 LatencyPriorityQueue q = AvailableQueue;
1094 while (!q.empty()) {
1095 SUnit *su = q.pop();
1096 errs() << "Height " << su->getHeight() << ": ";
1100 SUnit *FoundSUnit = 0;
1102 bool HasNoopHazards = false;
1103 while (!AvailableQueue.empty()) {
1104 SUnit *CurSUnit = AvailableQueue.pop();
1106 ScheduleHazardRecognizer::HazardType HT =
1107 HazardRec->getHazardType(CurSUnit);
1108 if (HT == ScheduleHazardRecognizer::NoHazard) {
1109 FoundSUnit = CurSUnit;
1113 // Remember if this is a noop hazard.
1114 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1116 NotReady.push_back(CurSUnit);
1119 // Add the nodes that aren't ready back onto the available list.
1120 if (!NotReady.empty()) {
1121 AvailableQueue.push_all(NotReady);
1125 // If we found a node to schedule, do it now.
1127 ScheduleNodeTopDown(FoundSUnit, CurCycle);
1128 HazardRec->EmitInstruction(FoundSUnit);
1129 CycleHasInsts = true;
1131 // If we are using the target-specific hazards, then don't
1132 // advance the cycle time just because we schedule a node. If
1133 // the target allows it we can schedule multiple nodes in the
1135 if (!EnablePostRAHazardAvoidance) {
1136 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1140 if (CycleHasInsts) {
1141 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1142 HazardRec->AdvanceCycle();
1143 } else if (!HasNoopHazards) {
1144 // Otherwise, we have a pipeline stall, but no other problem,
1145 // just advance the current cycle and try again.
1146 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1147 HazardRec->AdvanceCycle();
1150 // Otherwise, we have no instructions to issue and we have instructions
1151 // that will fault if we don't do this right. This is the case for
1152 // processors without pipeline interlocks and other cases.
1153 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1154 HazardRec->EmitNoop();
1155 Sequence.push_back(0); // NULL here means noop
1160 CycleHasInsts = false;
1165 VerifySchedule(/*isBottomUp=*/false);
1169 //===----------------------------------------------------------------------===//
1170 // Public Constructor Functions
1171 //===----------------------------------------------------------------------===//
1173 FunctionPass *llvm::createPostRAScheduler() {
1174 return new PostRAScheduler();