1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "ExactHazardRecognizer.h"
23 #include "SimpleHazardRecognizer.h"
24 #include "ScheduleDAGInstrs.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/LatencyPriorityQueue.h"
27 #include "llvm/CodeGen/SchedulerRegistry.h"
28 #include "llvm/CodeGen/MachineDominators.h"
29 #include "llvm/CodeGen/MachineFrameInfo.h"
30 #include "llvm/CodeGen/MachineFunctionPass.h"
31 #include "llvm/CodeGen/MachineLoopInfo.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
34 #include "llvm/Analysis/AliasAnalysis.h"
35 #include "llvm/Target/TargetLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetInstrInfo.h"
38 #include "llvm/Target/TargetRegisterInfo.h"
39 #include "llvm/Target/TargetSubtarget.h"
40 #include "llvm/Support/Compiler.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/ErrorHandling.h"
43 #include "llvm/Support/raw_ostream.h"
44 #include "llvm/ADT/Statistic.h"
49 STATISTIC(NumNoops, "Number of noops inserted");
50 STATISTIC(NumStalls, "Number of pipeline stalls");
52 // Post-RA scheduling is enabled with
53 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
54 // override the target.
56 EnablePostRAScheduler("post-RA-scheduler",
57 cl::desc("Enable scheduling after register allocation"),
58 cl::init(false), cl::Hidden);
60 EnableAntiDepBreaking("break-anti-dependencies",
61 cl::desc("Break post-RA scheduling anti-dependencies"),
62 cl::init(true), cl::Hidden);
64 EnablePostRAHazardAvoidance("avoid-hazards",
65 cl::desc("Enable exact hazard avoidance"),
66 cl::init(true), cl::Hidden);
68 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
70 DebugDiv("postra-sched-debugdiv",
71 cl::desc("Debug control MBBs that are scheduled"),
72 cl::init(0), cl::Hidden);
74 DebugMod("postra-sched-debugmod",
75 cl::desc("Debug control MBBs that are scheduled"),
76 cl::init(0), cl::Hidden);
79 class PostRAScheduler : public MachineFunctionPass {
81 CodeGenOpt::Level OptLevel;
85 PostRAScheduler(CodeGenOpt::Level ol) :
86 MachineFunctionPass(&ID), OptLevel(ol) {}
88 void getAnalysisUsage(AnalysisUsage &AU) const {
90 AU.addRequired<AliasAnalysis>();
91 AU.addRequired<MachineDominatorTree>();
92 AU.addPreserved<MachineDominatorTree>();
93 AU.addRequired<MachineLoopInfo>();
94 AU.addPreserved<MachineLoopInfo>();
95 MachineFunctionPass::getAnalysisUsage(AU);
98 const char *getPassName() const {
99 return "Post RA top-down list latency scheduler";
102 bool runOnMachineFunction(MachineFunction &Fn);
104 char PostRAScheduler::ID = 0;
106 class SchedulePostRATDList : public ScheduleDAGInstrs {
107 /// AvailableQueue - The priority queue to use for the available SUnits.
109 LatencyPriorityQueue AvailableQueue;
111 /// PendingQueue - This contains all of the instructions whose operands have
112 /// been issued, but their results are not ready yet (due to the latency of
113 /// the operation). Once the operands becomes available, the instruction is
114 /// added to the AvailableQueue.
115 std::vector<SUnit*> PendingQueue;
117 /// Topo - A topological ordering for SUnits.
118 ScheduleDAGTopologicalSort Topo;
120 /// AllocatableSet - The set of allocatable registers.
121 /// We'll be ignoring anti-dependencies on non-allocatable registers,
122 /// because they may not be safe to break.
123 const BitVector AllocatableSet;
125 /// HazardRec - The hazard recognizer to use.
126 ScheduleHazardRecognizer *HazardRec;
128 /// AA - AliasAnalysis for making memory reference queries.
131 /// AntiDepMode - Anti-dependence breaking mode
132 TargetSubtarget::AntiDepBreakMode AntiDepMode;
134 /// Classes - For live regs that are only used in one register class in a
135 /// live range, the register class. If the register is not live, the
136 /// corresponding value is null. If the register is live but used in
137 /// multiple register classes, the corresponding value is -1 casted to a
139 const TargetRegisterClass *
140 Classes[TargetRegisterInfo::FirstVirtualRegister];
142 /// RegRegs - Map registers to all their references within a live range.
143 std::multimap<unsigned, MachineOperand *> RegRefs;
145 /// KillIndices - The index of the most recent kill (proceding bottom-up),
146 /// or ~0u if the register is not live.
147 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
149 /// DefIndices - The index of the most recent complete def (proceding bottom
150 /// up), or ~0u if the register is live.
151 unsigned DefIndices[TargetRegisterInfo::FirstVirtualRegister];
153 /// KeepRegs - A set of registers which are live and cannot be changed to
154 /// break anti-dependencies.
155 SmallSet<unsigned, 4> KeepRegs;
158 SchedulePostRATDList(MachineFunction &MF,
159 const MachineLoopInfo &MLI,
160 const MachineDominatorTree &MDT,
161 ScheduleHazardRecognizer *HR,
163 TargetSubtarget::AntiDepBreakMode adm)
164 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
165 AllocatableSet(TRI->getAllocatableSet(MF)),
166 HazardRec(HR), AA(aa), AntiDepMode(adm) {}
168 ~SchedulePostRATDList() {
172 /// StartBlock - Initialize register live-range state for scheduling in
175 void StartBlock(MachineBasicBlock *BB);
177 /// Schedule - Schedule the instruction range using list scheduling.
181 /// FixupKills - Fix register kill flags that have been made
182 /// invalid due to scheduling
184 void FixupKills(MachineBasicBlock *MBB);
186 /// Observe - Update liveness information to account for the current
187 /// instruction, which will not be scheduled.
189 void Observe(MachineInstr *MI, unsigned Count);
191 /// FinishBlock - Clean up register live-range state.
196 void PrescanInstruction(MachineInstr *MI);
197 void ScanInstruction(MachineInstr *MI, unsigned Count);
198 void ReleaseSucc(SUnit *SU, SDep *SuccEdge);
199 void ReleaseSuccessors(SUnit *SU);
200 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle);
201 void ListScheduleTopDown();
202 bool BreakAntiDependencies();
203 unsigned findSuitableFreeRegister(unsigned AntiDepReg,
205 const TargetRegisterClass *);
206 void StartBlockForKills(MachineBasicBlock *BB);
208 // ToggleKillFlag - Toggle a register operand kill flag. Other
209 // adjustments may be made to the instruction if necessary. Return
210 // true if the operand has been deleted, false if not.
211 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
215 /// isSchedulingBoundary - Test if the given instruction should be
216 /// considered a scheduling boundary. This primarily includes labels
219 static bool isSchedulingBoundary(const MachineInstr *MI,
220 const MachineFunction &MF) {
221 // Terminators and labels can't be scheduled around.
222 if (MI->getDesc().isTerminator() || MI->isLabel())
225 // Don't attempt to schedule around any instruction that modifies
226 // a stack-oriented pointer, as it's unlikely to be profitable. This
227 // saves compile time, because it doesn't require every single
228 // stack slot reference to depend on the instruction that does the
230 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
231 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
237 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
238 AA = &getAnalysis<AliasAnalysis>();
240 // Check for explicit enable/disable of post-ra scheduling.
241 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
242 if (EnablePostRAScheduler.getPosition() > 0) {
243 if (!EnablePostRAScheduler)
246 // Check that post-RA scheduling is enabled for this target.
247 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
248 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
252 // Check for antidep breaking override...
253 if (EnableAntiDepBreaking.getPosition() > 0) {
254 AntiDepMode = (EnableAntiDepBreaking) ?
255 TargetSubtarget::ANTIDEP_CRITICAL : TargetSubtarget::ANTIDEP_NONE;
258 DEBUG(errs() << "PostRAScheduler\n");
260 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
261 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
262 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
263 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
264 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
265 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
267 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, AA, AntiDepMode);
269 // Loop over all of the basic blocks
270 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
271 MBB != MBBe; ++MBB) {
273 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
275 static int bbcnt = 0;
276 if (bbcnt++ % DebugDiv != DebugMod)
278 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
279 ":MBB ID#" << MBB->getNumber() << " ***\n";
283 // Initialize register live-range state for scheduling in this block.
284 Scheduler.StartBlock(MBB);
286 // Schedule each sequence of instructions not interrupted by a label
287 // or anything else that effectively needs to shut down scheduling.
288 MachineBasicBlock::iterator Current = MBB->end();
289 unsigned Count = MBB->size(), CurrentCount = Count;
290 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
291 MachineInstr *MI = prior(I);
292 if (isSchedulingBoundary(MI, Fn)) {
293 Scheduler.Run(MBB, I, Current, CurrentCount);
294 Scheduler.EmitSchedule(0);
296 CurrentCount = Count - 1;
297 Scheduler.Observe(MI, CurrentCount);
302 assert(Count == 0 && "Instruction count mismatch!");
303 assert((MBB->begin() == Current || CurrentCount != 0) &&
304 "Instruction count mismatch!");
305 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
306 Scheduler.EmitSchedule(0);
308 // Clean up register live-range state.
309 Scheduler.FinishBlock();
311 // Update register kills
312 Scheduler.FixupKills(MBB);
318 /// StartBlock - Initialize register live-range state for scheduling in
321 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
322 // Call the superclass.
323 ScheduleDAGInstrs::StartBlock(BB);
325 // Reset the hazard recognizer.
328 // Clear out the register class data.
329 std::fill(Classes, array_endof(Classes),
330 static_cast<const TargetRegisterClass *>(0));
332 // Initialize the indices to indicate that no registers are live.
333 std::fill(KillIndices, array_endof(KillIndices), ~0u);
334 std::fill(DefIndices, array_endof(DefIndices), BB->size());
336 // Clear "do not change" set.
339 bool IsReturnBlock = (!BB->empty() && BB->back().getDesc().isReturn());
341 // Determine the live-out physregs for this block.
343 // In a return block, examine the function live-out regs.
344 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
345 E = MRI.liveout_end(); I != E; ++I) {
347 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
348 KillIndices[Reg] = BB->size();
349 DefIndices[Reg] = ~0u;
350 // Repeat, for all aliases.
351 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
352 unsigned AliasReg = *Alias;
353 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
354 KillIndices[AliasReg] = BB->size();
355 DefIndices[AliasReg] = ~0u;
359 // In a non-return block, examine the live-in regs of all successors.
360 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
361 SE = BB->succ_end(); SI != SE; ++SI)
362 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
363 E = (*SI)->livein_end(); I != E; ++I) {
365 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
366 KillIndices[Reg] = BB->size();
367 DefIndices[Reg] = ~0u;
368 // Repeat, for all aliases.
369 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
370 unsigned AliasReg = *Alias;
371 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
372 KillIndices[AliasReg] = BB->size();
373 DefIndices[AliasReg] = ~0u;
378 // Mark live-out callee-saved registers. In a return block this is
379 // all callee-saved registers. In non-return this is any
380 // callee-saved register that is not saved in the prolog.
381 const MachineFrameInfo *MFI = MF.getFrameInfo();
382 BitVector Pristine = MFI->getPristineRegs(BB);
383 for (const unsigned *I = TRI->getCalleeSavedRegs(); *I; ++I) {
385 if (!IsReturnBlock && !Pristine.test(Reg)) continue;
386 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
387 KillIndices[Reg] = BB->size();
388 DefIndices[Reg] = ~0u;
389 // Repeat, for all aliases.
390 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
391 unsigned AliasReg = *Alias;
392 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
393 KillIndices[AliasReg] = BB->size();
394 DefIndices[AliasReg] = ~0u;
399 /// Schedule - Schedule the instruction range using list scheduling.
401 void SchedulePostRATDList::Schedule() {
402 DEBUG(errs() << "********** List Scheduling **********\n");
404 // Build the scheduling graph.
407 if (AntiDepMode != TargetSubtarget::ANTIDEP_NONE) {
408 if (BreakAntiDependencies()) {
409 // We made changes. Update the dependency graph.
410 // Theoretically we could update the graph in place:
411 // When a live range is changed to use a different register, remove
412 // the def's anti-dependence *and* output-dependence edges due to
413 // that register, and add new anti-dependence and output-dependence
414 // edges based on the next live range of the register.
422 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
423 SUnits[su].dumpAll(this));
425 AvailableQueue.initNodes(SUnits);
427 ListScheduleTopDown();
429 AvailableQueue.releaseState();
432 /// Observe - Update liveness information to account for the current
433 /// instruction, which will not be scheduled.
435 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
436 assert(Count < InsertPosIndex && "Instruction index out of expected range!");
438 // Any register which was defined within the previous scheduling region
439 // may have been rescheduled and its lifetime may overlap with registers
440 // in ways not reflected in our current liveness state. For each such
441 // register, adjust the liveness state to be conservatively correct.
442 for (unsigned Reg = 0; Reg != TargetRegisterInfo::FirstVirtualRegister; ++Reg)
443 if (DefIndices[Reg] < InsertPosIndex && DefIndices[Reg] >= Count) {
444 assert(KillIndices[Reg] == ~0u && "Clobbered register is live!");
445 // Mark this register to be non-renamable.
446 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
447 // Move the def index to the end of the previous region, to reflect
448 // that the def could theoretically have been scheduled at the end.
449 DefIndices[Reg] = InsertPosIndex;
452 PrescanInstruction(MI);
453 ScanInstruction(MI, Count);
456 /// FinishBlock - Clean up register live-range state.
458 void SchedulePostRATDList::FinishBlock() {
461 // Call the superclass.
462 ScheduleDAGInstrs::FinishBlock();
465 /// CriticalPathStep - Return the next SUnit after SU on the bottom-up
467 static SDep *CriticalPathStep(SUnit *SU) {
469 unsigned NextDepth = 0;
470 // Find the predecessor edge with the greatest depth.
471 for (SUnit::pred_iterator P = SU->Preds.begin(), PE = SU->Preds.end();
473 SUnit *PredSU = P->getSUnit();
474 unsigned PredLatency = P->getLatency();
475 unsigned PredTotalLatency = PredSU->getDepth() + PredLatency;
476 // In the case of a latency tie, prefer an anti-dependency edge over
477 // other types of edges.
478 if (NextDepth < PredTotalLatency ||
479 (NextDepth == PredTotalLatency && P->getKind() == SDep::Anti)) {
480 NextDepth = PredTotalLatency;
487 void SchedulePostRATDList::PrescanInstruction(MachineInstr *MI) {
488 // Scan the register operands for this instruction and update
489 // Classes and RegRefs.
490 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
491 MachineOperand &MO = MI->getOperand(i);
492 if (!MO.isReg()) continue;
493 unsigned Reg = MO.getReg();
494 if (Reg == 0) continue;
495 const TargetRegisterClass *NewRC = 0;
497 if (i < MI->getDesc().getNumOperands())
498 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
500 // For now, only allow the register to be changed if its register
501 // class is consistent across all uses.
502 if (!Classes[Reg] && NewRC)
503 Classes[Reg] = NewRC;
504 else if (!NewRC || Classes[Reg] != NewRC)
505 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
507 // Now check for aliases.
508 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
509 // If an alias of the reg is used during the live range, give up.
510 // Note that this allows us to skip checking if AntiDepReg
511 // overlaps with any of the aliases, among other things.
512 unsigned AliasReg = *Alias;
513 if (Classes[AliasReg]) {
514 Classes[AliasReg] = reinterpret_cast<TargetRegisterClass *>(-1);
515 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
519 // If we're still willing to consider this register, note the reference.
520 if (Classes[Reg] != reinterpret_cast<TargetRegisterClass *>(-1))
521 RegRefs.insert(std::make_pair(Reg, &MO));
523 // It's not safe to change register allocation for source operands of
524 // that have special allocation requirements.
525 if (MO.isUse() && MI->getDesc().hasExtraSrcRegAllocReq()) {
526 if (KeepRegs.insert(Reg)) {
527 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
529 KeepRegs.insert(*Subreg);
535 void SchedulePostRATDList::ScanInstruction(MachineInstr *MI,
538 // Proceding upwards, registers that are defed but not used in this
539 // instruction are now dead.
540 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
541 MachineOperand &MO = MI->getOperand(i);
542 if (!MO.isReg()) continue;
543 unsigned Reg = MO.getReg();
544 if (Reg == 0) continue;
545 if (!MO.isDef()) continue;
546 // Ignore two-addr defs.
547 if (MI->isRegTiedToUseOperand(i)) continue;
549 DefIndices[Reg] = Count;
550 KillIndices[Reg] = ~0u;
551 assert(((KillIndices[Reg] == ~0u) !=
552 (DefIndices[Reg] == ~0u)) &&
553 "Kill and Def maps aren't consistent for Reg!");
557 // Repeat, for all subregs.
558 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
560 unsigned SubregReg = *Subreg;
561 DefIndices[SubregReg] = Count;
562 KillIndices[SubregReg] = ~0u;
563 KeepRegs.erase(SubregReg);
564 Classes[SubregReg] = 0;
565 RegRefs.erase(SubregReg);
567 // Conservatively mark super-registers as unusable.
568 for (const unsigned *Super = TRI->getSuperRegisters(Reg);
570 unsigned SuperReg = *Super;
571 Classes[SuperReg] = reinterpret_cast<TargetRegisterClass *>(-1);
574 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
575 MachineOperand &MO = MI->getOperand(i);
576 if (!MO.isReg()) continue;
577 unsigned Reg = MO.getReg();
578 if (Reg == 0) continue;
579 if (!MO.isUse()) continue;
581 const TargetRegisterClass *NewRC = 0;
582 if (i < MI->getDesc().getNumOperands())
583 NewRC = MI->getDesc().OpInfo[i].getRegClass(TRI);
585 // For now, only allow the register to be changed if its register
586 // class is consistent across all uses.
587 if (!Classes[Reg] && NewRC)
588 Classes[Reg] = NewRC;
589 else if (!NewRC || Classes[Reg] != NewRC)
590 Classes[Reg] = reinterpret_cast<TargetRegisterClass *>(-1);
592 RegRefs.insert(std::make_pair(Reg, &MO));
594 // It wasn't previously live but now it is, this is a kill.
595 if (KillIndices[Reg] == ~0u) {
596 KillIndices[Reg] = Count;
597 DefIndices[Reg] = ~0u;
598 assert(((KillIndices[Reg] == ~0u) !=
599 (DefIndices[Reg] == ~0u)) &&
600 "Kill and Def maps aren't consistent for Reg!");
602 // Repeat, for all aliases.
603 for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
604 unsigned AliasReg = *Alias;
605 if (KillIndices[AliasReg] == ~0u) {
606 KillIndices[AliasReg] = Count;
607 DefIndices[AliasReg] = ~0u;
614 SchedulePostRATDList::findSuitableFreeRegister(unsigned AntiDepReg,
616 const TargetRegisterClass *RC) {
617 for (TargetRegisterClass::iterator R = RC->allocation_order_begin(MF),
618 RE = RC->allocation_order_end(MF); R != RE; ++R) {
619 unsigned NewReg = *R;
620 // Don't replace a register with itself.
621 if (NewReg == AntiDepReg) continue;
622 // Don't replace a register with one that was recently used to repair
623 // an anti-dependence with this AntiDepReg, because that would
624 // re-introduce that anti-dependence.
625 if (NewReg == LastNewReg) continue;
626 // If NewReg is dead and NewReg's most recent def is not before
627 // AntiDepReg's kill, it's safe to replace AntiDepReg with NewReg.
628 assert(((KillIndices[AntiDepReg] == ~0u) != (DefIndices[AntiDepReg] == ~0u)) &&
629 "Kill and Def maps aren't consistent for AntiDepReg!");
630 assert(((KillIndices[NewReg] == ~0u) != (DefIndices[NewReg] == ~0u)) &&
631 "Kill and Def maps aren't consistent for NewReg!");
632 if (KillIndices[NewReg] != ~0u ||
633 Classes[NewReg] == reinterpret_cast<TargetRegisterClass *>(-1) ||
634 KillIndices[AntiDepReg] > DefIndices[NewReg])
639 // No registers are free and available!
643 /// BreakAntiDependencies - Identifiy anti-dependencies along the critical path
644 /// of the ScheduleDAG and break them by renaming registers.
646 bool SchedulePostRATDList::BreakAntiDependencies() {
647 // The code below assumes that there is at least one instruction,
648 // so just duck out immediately if the block is empty.
649 if (SUnits.empty()) return false;
651 // Find the node at the bottom of the critical path.
653 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
654 SUnit *SU = &SUnits[i];
655 if (!Max || SU->getDepth() + SU->Latency > Max->getDepth() + Max->Latency)
661 DEBUG(errs() << "Critical path has total latency "
662 << (Max->getDepth() + Max->Latency) << "\n");
663 DEBUG(errs() << "Available regs:");
664 for (unsigned Reg = 0; Reg < TRI->getNumRegs(); ++Reg) {
665 if (KillIndices[Reg] == ~0u)
666 DEBUG(errs() << " " << TRI->getName(Reg));
668 DEBUG(errs() << '\n');
672 // Track progress along the critical path through the SUnit graph as we walk
674 SUnit *CriticalPathSU = Max;
675 MachineInstr *CriticalPathMI = CriticalPathSU->getInstr();
677 // Consider this pattern:
686 // There are three anti-dependencies here, and without special care,
687 // we'd break all of them using the same register:
696 // because at each anti-dependence, B is the first register that
697 // isn't A which is free. This re-introduces anti-dependencies
698 // at all but one of the original anti-dependencies that we were
699 // trying to break. To avoid this, keep track of the most recent
700 // register that each register was replaced with, avoid
701 // using it to repair an anti-dependence on the same register.
702 // This lets us produce this:
711 // This still has an anti-dependence on B, but at least it isn't on the
712 // original critical path.
714 // TODO: If we tracked more than one register here, we could potentially
715 // fix that remaining critical edge too. This is a little more involved,
716 // because unlike the most recent register, less recent registers should
717 // still be considered, though only if no other registers are available.
718 unsigned LastNewReg[TargetRegisterInfo::FirstVirtualRegister] = {};
720 // Attempt to break anti-dependence edges on the critical path. Walk the
721 // instructions from the bottom up, tracking information about liveness
722 // as we go to help determine which registers are available.
723 bool Changed = false;
724 unsigned Count = InsertPosIndex - 1;
725 for (MachineBasicBlock::iterator I = InsertPos, E = Begin;
727 MachineInstr *MI = --I;
729 // Check if this instruction has a dependence on the critical path that
730 // is an anti-dependence that we may be able to break. If it is, set
731 // AntiDepReg to the non-zero register associated with the anti-dependence.
733 // We limit our attention to the critical path as a heuristic to avoid
734 // breaking anti-dependence edges that aren't going to significantly
735 // impact the overall schedule. There are a limited number of registers
736 // and we want to save them for the important edges.
738 // TODO: Instructions with multiple defs could have multiple
739 // anti-dependencies. The current code here only knows how to break one
740 // edge per instruction. Note that we'd have to be able to break all of
741 // the anti-dependencies in an instruction in order to be effective.
742 unsigned AntiDepReg = 0;
743 if (MI == CriticalPathMI) {
744 if (SDep *Edge = CriticalPathStep(CriticalPathSU)) {
745 SUnit *NextSU = Edge->getSUnit();
747 // Only consider anti-dependence edges.
748 if (Edge->getKind() == SDep::Anti) {
749 AntiDepReg = Edge->getReg();
750 assert(AntiDepReg != 0 && "Anti-dependence on reg0?");
751 if (!AllocatableSet.test(AntiDepReg))
752 // Don't break anti-dependencies on non-allocatable registers.
754 else if (KeepRegs.count(AntiDepReg))
755 // Don't break anti-dependencies if an use down below requires
756 // this exact register.
759 // If the SUnit has other dependencies on the SUnit that it
760 // anti-depends on, don't bother breaking the anti-dependency
761 // since those edges would prevent such units from being
762 // scheduled past each other regardless.
764 // Also, if there are dependencies on other SUnits with the
765 // same register as the anti-dependency, don't attempt to
767 for (SUnit::pred_iterator P = CriticalPathSU->Preds.begin(),
768 PE = CriticalPathSU->Preds.end(); P != PE; ++P)
769 if (P->getSUnit() == NextSU ?
770 (P->getKind() != SDep::Anti || P->getReg() != AntiDepReg) :
771 (P->getKind() == SDep::Data && P->getReg() == AntiDepReg)) {
777 CriticalPathSU = NextSU;
778 CriticalPathMI = CriticalPathSU->getInstr();
780 // We've reached the end of the critical path.
786 PrescanInstruction(MI);
788 if (MI->getDesc().hasExtraDefRegAllocReq())
789 // If this instruction's defs have special allocation requirement, don't
790 // break this anti-dependency.
792 else if (AntiDepReg) {
793 // If this instruction has a use of AntiDepReg, breaking it
795 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
796 MachineOperand &MO = MI->getOperand(i);
797 if (!MO.isReg()) continue;
798 unsigned Reg = MO.getReg();
799 if (Reg == 0) continue;
800 if (MO.isUse() && AntiDepReg == Reg) {
807 // Determine AntiDepReg's register class, if it is live and is
808 // consistently used within a single class.
809 const TargetRegisterClass *RC = AntiDepReg != 0 ? Classes[AntiDepReg] : 0;
810 assert((AntiDepReg == 0 || RC != NULL) &&
811 "Register should be live if it's causing an anti-dependence!");
812 if (RC == reinterpret_cast<TargetRegisterClass *>(-1))
815 // Look for a suitable register to use to break the anti-depenence.
817 // TODO: Instead of picking the first free register, consider which might
819 if (AntiDepReg != 0) {
820 if (unsigned NewReg = findSuitableFreeRegister(AntiDepReg,
821 LastNewReg[AntiDepReg],
823 DEBUG(errs() << "Breaking anti-dependence edge on "
824 << TRI->getName(AntiDepReg)
825 << " with " << RegRefs.count(AntiDepReg) << " references"
826 << " using " << TRI->getName(NewReg) << "!\n");
828 // Update the references to the old register to refer to the new
830 std::pair<std::multimap<unsigned, MachineOperand *>::iterator,
831 std::multimap<unsigned, MachineOperand *>::iterator>
832 Range = RegRefs.equal_range(AntiDepReg);
833 for (std::multimap<unsigned, MachineOperand *>::iterator
834 Q = Range.first, QE = Range.second; Q != QE; ++Q)
835 Q->second->setReg(NewReg);
837 // We just went back in time and modified history; the
838 // liveness information for the anti-depenence reg is now
839 // inconsistent. Set the state as if it were dead.
840 Classes[NewReg] = Classes[AntiDepReg];
841 DefIndices[NewReg] = DefIndices[AntiDepReg];
842 KillIndices[NewReg] = KillIndices[AntiDepReg];
843 assert(((KillIndices[NewReg] == ~0u) !=
844 (DefIndices[NewReg] == ~0u)) &&
845 "Kill and Def maps aren't consistent for NewReg!");
847 Classes[AntiDepReg] = 0;
848 DefIndices[AntiDepReg] = KillIndices[AntiDepReg];
849 KillIndices[AntiDepReg] = ~0u;
850 assert(((KillIndices[AntiDepReg] == ~0u) !=
851 (DefIndices[AntiDepReg] == ~0u)) &&
852 "Kill and Def maps aren't consistent for AntiDepReg!");
854 RegRefs.erase(AntiDepReg);
856 LastNewReg[AntiDepReg] = NewReg;
860 ScanInstruction(MI, Count);
866 /// StartBlockForKills - Initialize register live-range state for updating kills
868 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
869 // Initialize the indices to indicate that no registers are live.
870 std::fill(KillIndices, array_endof(KillIndices), ~0u);
872 // Determine the live-out physregs for this block.
873 if (!BB->empty() && BB->back().getDesc().isReturn()) {
874 // In a return block, examine the function live-out regs.
875 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
876 E = MRI.liveout_end(); I != E; ++I) {
878 KillIndices[Reg] = BB->size();
879 // Repeat, for all subregs.
880 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
882 KillIndices[*Subreg] = BB->size();
887 // In a non-return block, examine the live-in regs of all successors.
888 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
889 SE = BB->succ_end(); SI != SE; ++SI) {
890 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
891 E = (*SI)->livein_end(); I != E; ++I) {
893 KillIndices[Reg] = BB->size();
894 // Repeat, for all subregs.
895 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
897 KillIndices[*Subreg] = BB->size();
904 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
905 MachineOperand &MO) {
906 // Setting kill flag...
912 // If MO itself is live, clear the kill flag...
913 if (KillIndices[MO.getReg()] != ~0u) {
918 // If any subreg of MO is live, then create an imp-def for that
919 // subreg and keep MO marked as killed.
922 const unsigned SuperReg = MO.getReg();
923 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
925 if (KillIndices[*Subreg] != ~0u) {
926 MI->addOperand(MachineOperand::CreateReg(*Subreg,
940 /// FixupKills - Fix the register kill flags, they may have been made
941 /// incorrect by instruction reordering.
943 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
944 DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
946 std::set<unsigned> killedRegs;
947 BitVector ReservedRegs = TRI->getReservedRegs(MF);
949 StartBlockForKills(MBB);
951 // Examine block from end to start...
952 unsigned Count = MBB->size();
953 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
955 MachineInstr *MI = --I;
957 // Update liveness. Registers that are defed but not used in this
958 // instruction are now dead. Mark register and all subregs as they
959 // are completely defined.
960 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
961 MachineOperand &MO = MI->getOperand(i);
962 if (!MO.isReg()) continue;
963 unsigned Reg = MO.getReg();
964 if (Reg == 0) continue;
965 if (!MO.isDef()) continue;
966 // Ignore two-addr defs.
967 if (MI->isRegTiedToUseOperand(i)) continue;
969 KillIndices[Reg] = ~0u;
971 // Repeat for all subregs.
972 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
974 KillIndices[*Subreg] = ~0u;
978 // Examine all used registers and set/clear kill flag. When a
979 // register is used multiple times we only set the kill flag on
982 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
983 MachineOperand &MO = MI->getOperand(i);
984 if (!MO.isReg() || !MO.isUse()) continue;
985 unsigned Reg = MO.getReg();
986 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
989 if (killedRegs.find(Reg) == killedRegs.end()) {
991 // A register is not killed if any subregs are live...
992 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
994 if (KillIndices[*Subreg] != ~0u) {
1000 // If subreg is not live, then register is killed if it became
1001 // live in this instruction
1003 kill = (KillIndices[Reg] == ~0u);
1006 if (MO.isKill() != kill) {
1007 bool removed = ToggleKillFlag(MI, MO);
1009 DEBUG(errs() << "Fixed <removed> in ");
1011 DEBUG(errs() << "Fixed " << MO << " in ");
1016 killedRegs.insert(Reg);
1019 // Mark any used register (that is not using undef) and subregs as
1021 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
1022 MachineOperand &MO = MI->getOperand(i);
1023 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
1024 unsigned Reg = MO.getReg();
1025 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
1027 KillIndices[Reg] = Count;
1029 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
1030 *Subreg; ++Subreg) {
1031 KillIndices[*Subreg] = Count;
1037 //===----------------------------------------------------------------------===//
1038 // Top-Down Scheduling
1039 //===----------------------------------------------------------------------===//
1041 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
1042 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
1043 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge) {
1044 SUnit *SuccSU = SuccEdge->getSUnit();
1047 if (SuccSU->NumPredsLeft == 0) {
1048 errs() << "*** Scheduling failed! ***\n";
1050 errs() << " has been released too many times!\n";
1051 llvm_unreachable(0);
1054 --SuccSU->NumPredsLeft;
1056 // Compute how many cycles it will be before this actually becomes
1057 // available. This is the max of the start time of all predecessors plus
1059 SuccSU->setDepthToAtLeast(SU->getDepth() + SuccEdge->getLatency());
1061 // If all the node's predecessors are scheduled, this node is ready
1062 // to be scheduled. Ignore the special ExitSU node.
1063 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
1064 PendingQueue.push_back(SuccSU);
1067 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
1068 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
1069 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
1071 ReleaseSucc(SU, &*I);
1074 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
1075 /// count of its successors. If a successor pending count is zero, add it to
1076 /// the Available queue.
1077 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle) {
1078 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
1079 DEBUG(SU->dump(this));
1081 Sequence.push_back(SU);
1082 assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
1083 SU->setDepthToAtLeast(CurCycle);
1085 ReleaseSuccessors(SU);
1086 SU->isScheduled = true;
1087 AvailableQueue.ScheduledNode(SU);
1090 /// ListScheduleTopDown - The main loop of list scheduling for top-down
1092 void SchedulePostRATDList::ListScheduleTopDown() {
1093 unsigned CurCycle = 0;
1095 // Release any successors of the special Entry node.
1096 ReleaseSuccessors(&EntrySU);
1098 // All leaves to Available queue.
1099 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
1100 // It is available if it has no predecessors.
1101 if (SUnits[i].Preds.empty()) {
1102 AvailableQueue.push(&SUnits[i]);
1103 SUnits[i].isAvailable = true;
1107 // In any cycle where we can't schedule any instructions, we must
1108 // stall or emit a noop, depending on the target.
1109 bool CycleHasInsts = false;
1111 // While Available queue is not empty, grab the node with the highest
1112 // priority. If it is not ready put it back. Schedule the node.
1113 std::vector<SUnit*> NotReady;
1114 Sequence.reserve(SUnits.size());
1115 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
1116 // Check to see if any of the pending instructions are ready to issue. If
1117 // so, add them to the available queue.
1118 unsigned MinDepth = ~0u;
1119 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
1120 if (PendingQueue[i]->getDepth() <= CurCycle) {
1121 AvailableQueue.push(PendingQueue[i]);
1122 PendingQueue[i]->isAvailable = true;
1123 PendingQueue[i] = PendingQueue.back();
1124 PendingQueue.pop_back();
1126 } else if (PendingQueue[i]->getDepth() < MinDepth)
1127 MinDepth = PendingQueue[i]->getDepth();
1130 DEBUG(errs() << "\n*** Examining Available\n";
1131 LatencyPriorityQueue q = AvailableQueue;
1132 while (!q.empty()) {
1133 SUnit *su = q.pop();
1134 errs() << "Height " << su->getHeight() << ": ";
1138 SUnit *FoundSUnit = 0;
1140 bool HasNoopHazards = false;
1141 while (!AvailableQueue.empty()) {
1142 SUnit *CurSUnit = AvailableQueue.pop();
1144 ScheduleHazardRecognizer::HazardType HT =
1145 HazardRec->getHazardType(CurSUnit);
1146 if (HT == ScheduleHazardRecognizer::NoHazard) {
1147 FoundSUnit = CurSUnit;
1151 // Remember if this is a noop hazard.
1152 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
1154 NotReady.push_back(CurSUnit);
1157 // Add the nodes that aren't ready back onto the available list.
1158 if (!NotReady.empty()) {
1159 AvailableQueue.push_all(NotReady);
1163 // If we found a node to schedule, do it now.
1165 ScheduleNodeTopDown(FoundSUnit, CurCycle);
1166 HazardRec->EmitInstruction(FoundSUnit);
1167 CycleHasInsts = true;
1169 // If we are using the target-specific hazards, then don't
1170 // advance the cycle time just because we schedule a node. If
1171 // the target allows it we can schedule multiple nodes in the
1173 if (!EnablePostRAHazardAvoidance) {
1174 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
1178 if (CycleHasInsts) {
1179 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
1180 HazardRec->AdvanceCycle();
1181 } else if (!HasNoopHazards) {
1182 // Otherwise, we have a pipeline stall, but no other problem,
1183 // just advance the current cycle and try again.
1184 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
1185 HazardRec->AdvanceCycle();
1188 // Otherwise, we have no instructions to issue and we have instructions
1189 // that will fault if we don't do this right. This is the case for
1190 // processors without pipeline interlocks and other cases.
1191 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
1192 HazardRec->EmitNoop();
1193 Sequence.push_back(0); // NULL here means noop
1198 CycleHasInsts = false;
1203 VerifySchedule(/*isBottomUp=*/false);
1207 //===----------------------------------------------------------------------===//
1208 // Public Constructor Functions
1209 //===----------------------------------------------------------------------===//
1211 FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
1212 return new PostRAScheduler(OptLevel);