1 //===----- SchedulePostRAList.cpp - list scheduler ------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This implements a top-down list scheduler, using standard algorithms.
11 // The basic approach uses a priority queue of available nodes to schedule.
12 // One at a time, nodes are taken from the priority queue (thus in priority
13 // order), checked for legality to schedule, and emitted if legal.
15 // Nodes may not be legal to schedule either due to structural hazards (e.g.
16 // pipeline or resource constraints) or because an input to the instruction has
17 // not completed execution.
19 //===----------------------------------------------------------------------===//
21 #define DEBUG_TYPE "post-RA-sched"
22 #include "AntiDepBreaker.h"
23 #include "AggressiveAntiDepBreaker.h"
24 #include "CriticalAntiDepBreaker.h"
25 #include "ExactHazardRecognizer.h"
26 #include "SimpleHazardRecognizer.h"
27 #include "ScheduleDAGInstrs.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/LatencyPriorityQueue.h"
30 #include "llvm/CodeGen/SchedulerRegistry.h"
31 #include "llvm/CodeGen/MachineDominators.h"
32 #include "llvm/CodeGen/MachineFrameInfo.h"
33 #include "llvm/CodeGen/MachineFunctionPass.h"
34 #include "llvm/CodeGen/MachineLoopInfo.h"
35 #include "llvm/CodeGen/MachineRegisterInfo.h"
36 #include "llvm/CodeGen/ScheduleHazardRecognizer.h"
37 #include "llvm/Analysis/AliasAnalysis.h"
38 #include "llvm/Target/TargetLowering.h"
39 #include "llvm/Target/TargetMachine.h"
40 #include "llvm/Target/TargetInstrInfo.h"
41 #include "llvm/Target/TargetRegisterInfo.h"
42 #include "llvm/Target/TargetSubtarget.h"
43 #include "llvm/Support/CommandLine.h"
44 #include "llvm/Support/Debug.h"
45 #include "llvm/Support/ErrorHandling.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/ADT/BitVector.h"
48 #include "llvm/ADT/Statistic.h"
53 STATISTIC(NumNoops, "Number of noops inserted");
54 STATISTIC(NumStalls, "Number of pipeline stalls");
55 STATISTIC(NumFixedAnti, "Number of fixed anti-dependencies");
57 // Post-RA scheduling is enabled with
58 // TargetSubtarget.enablePostRAScheduler(). This flag can be used to
59 // override the target.
61 EnablePostRAScheduler("post-RA-scheduler",
62 cl::desc("Enable scheduling after register allocation"),
63 cl::init(false), cl::Hidden);
64 static cl::opt<std::string>
65 EnableAntiDepBreaking("break-anti-dependencies",
66 cl::desc("Break post-RA scheduling anti-dependencies: "
67 "\"critical\", \"all\", or \"none\""),
68 cl::init("none"), cl::Hidden);
70 EnablePostRAHazardAvoidance("avoid-hazards",
71 cl::desc("Enable exact hazard avoidance"),
72 cl::init(true), cl::Hidden);
74 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
76 DebugDiv("postra-sched-debugdiv",
77 cl::desc("Debug control MBBs that are scheduled"),
78 cl::init(0), cl::Hidden);
80 DebugMod("postra-sched-debugmod",
81 cl::desc("Debug control MBBs that are scheduled"),
82 cl::init(0), cl::Hidden);
84 AntiDepBreaker::~AntiDepBreaker() { }
87 class PostRAScheduler : public MachineFunctionPass {
89 CodeGenOpt::Level OptLevel;
93 PostRAScheduler(CodeGenOpt::Level ol) :
94 MachineFunctionPass(&ID), OptLevel(ol) {}
96 void getAnalysisUsage(AnalysisUsage &AU) const {
98 AU.addRequired<AliasAnalysis>();
99 AU.addRequired<MachineDominatorTree>();
100 AU.addPreserved<MachineDominatorTree>();
101 AU.addRequired<MachineLoopInfo>();
102 AU.addPreserved<MachineLoopInfo>();
103 MachineFunctionPass::getAnalysisUsage(AU);
106 const char *getPassName() const {
107 return "Post RA top-down list latency scheduler";
110 bool runOnMachineFunction(MachineFunction &Fn);
112 char PostRAScheduler::ID = 0;
114 class SchedulePostRATDList : public ScheduleDAGInstrs {
115 /// AvailableQueue - The priority queue to use for the available SUnits.
117 LatencyPriorityQueue AvailableQueue;
119 /// PendingQueue - This contains all of the instructions whose operands have
120 /// been issued, but their results are not ready yet (due to the latency of
121 /// the operation). Once the operands becomes available, the instruction is
122 /// added to the AvailableQueue.
123 std::vector<SUnit*> PendingQueue;
125 /// Topo - A topological ordering for SUnits.
126 ScheduleDAGTopologicalSort Topo;
128 /// HazardRec - The hazard recognizer to use.
129 ScheduleHazardRecognizer *HazardRec;
131 /// AntiDepBreak - Anti-dependence breaking object, or NULL if none
132 AntiDepBreaker *AntiDepBreak;
134 /// AA - AliasAnalysis for making memory reference queries.
137 /// KillIndices - The index of the most recent kill (proceding bottom-up),
138 /// or ~0u if the register is not live.
139 unsigned KillIndices[TargetRegisterInfo::FirstVirtualRegister];
142 SchedulePostRATDList(MachineFunction &MF,
143 const MachineLoopInfo &MLI,
144 const MachineDominatorTree &MDT,
145 ScheduleHazardRecognizer *HR,
148 : ScheduleDAGInstrs(MF, MLI, MDT), Topo(SUnits),
149 HazardRec(HR), AntiDepBreak(ADB), AA(aa) {}
151 ~SchedulePostRATDList() {
154 /// StartBlock - Initialize register live-range state for scheduling in
157 void StartBlock(MachineBasicBlock *BB);
159 /// Schedule - Schedule the instruction range using list scheduling.
163 /// Observe - Update liveness information to account for the current
164 /// instruction, which will not be scheduled.
166 void Observe(MachineInstr *MI, unsigned Count);
168 /// FinishBlock - Clean up register live-range state.
172 /// FixupKills - Fix register kill flags that have been made
173 /// invalid due to scheduling
175 void FixupKills(MachineBasicBlock *MBB);
178 void ReleaseSucc(SUnit *SU, SDep *SuccEdge, bool IgnoreAntiDep);
179 void ReleaseSuccessors(SUnit *SU, bool IgnoreAntiDep);
180 void ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle, bool IgnoreAntiDep);
181 void ListScheduleTopDown(
182 AntiDepBreaker::CandidateMap *AntiDepCandidates);
183 void StartBlockForKills(MachineBasicBlock *BB);
185 // ToggleKillFlag - Toggle a register operand kill flag. Other
186 // adjustments may be made to the instruction if necessary. Return
187 // true if the operand has been deleted, false if not.
188 bool ToggleKillFlag(MachineInstr *MI, MachineOperand &MO);
192 /// isSchedulingBoundary - Test if the given instruction should be
193 /// considered a scheduling boundary. This primarily includes labels
196 static bool isSchedulingBoundary(const MachineInstr *MI,
197 const MachineFunction &MF) {
198 // Terminators and labels can't be scheduled around.
199 if (MI->getDesc().isTerminator() || MI->isLabel())
202 // Don't attempt to schedule around any instruction that modifies
203 // a stack-oriented pointer, as it's unlikely to be profitable. This
204 // saves compile time, because it doesn't require every single
205 // stack slot reference to depend on the instruction that does the
207 const TargetLowering &TLI = *MF.getTarget().getTargetLowering();
208 if (MI->modifiesRegister(TLI.getStackPointerRegisterToSaveRestore()))
214 bool PostRAScheduler::runOnMachineFunction(MachineFunction &Fn) {
215 AA = &getAnalysis<AliasAnalysis>();
217 // Check for explicit enable/disable of post-ra scheduling.
218 TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
219 SmallVector<TargetRegisterClass*, 4> ExcludedRCs;
220 if (EnablePostRAScheduler.getPosition() > 0) {
221 if (!EnablePostRAScheduler)
224 // Check that post-RA scheduling is enabled for this target.
225 const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
226 if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, ExcludedRCs))
230 // Check for antidep breaking override...
231 if (EnableAntiDepBreaking.getPosition() > 0) {
232 AntiDepMode = (EnableAntiDepBreaking == "all") ? TargetSubtarget::ANTIDEP_ALL :
233 (EnableAntiDepBreaking == "critical") ? TargetSubtarget::ANTIDEP_CRITICAL :
234 TargetSubtarget::ANTIDEP_NONE;
237 DEBUG(errs() << "PostRAScheduler\n");
239 const MachineLoopInfo &MLI = getAnalysis<MachineLoopInfo>();
240 const MachineDominatorTree &MDT = getAnalysis<MachineDominatorTree>();
241 const InstrItineraryData &InstrItins = Fn.getTarget().getInstrItineraryData();
242 ScheduleHazardRecognizer *HR = EnablePostRAHazardAvoidance ?
243 (ScheduleHazardRecognizer *)new ExactHazardRecognizer(InstrItins) :
244 (ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
245 AntiDepBreaker *ADB =
246 ((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
247 (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, ExcludedRCs) :
248 ((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
249 (AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
251 SchedulePostRATDList Scheduler(Fn, MLI, MDT, HR, ADB, AA);
253 // Loop over all of the basic blocks
254 for (MachineFunction::iterator MBB = Fn.begin(), MBBe = Fn.end();
255 MBB != MBBe; ++MBB) {
257 // If DebugDiv > 0 then only schedule MBB with (ID % DebugDiv) == DebugMod
259 static int bbcnt = 0;
260 if (bbcnt++ % DebugDiv != DebugMod)
262 errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
263 ":BB#" << MBB->getNumber() << " ***\n";
267 // Initialize register live-range state for scheduling in this block.
268 Scheduler.StartBlock(MBB);
270 // Schedule each sequence of instructions not interrupted by a label
271 // or anything else that effectively needs to shut down scheduling.
272 MachineBasicBlock::iterator Current = MBB->end();
273 unsigned Count = MBB->size(), CurrentCount = Count;
274 for (MachineBasicBlock::iterator I = Current; I != MBB->begin(); ) {
275 MachineInstr *MI = prior(I);
276 if (isSchedulingBoundary(MI, Fn)) {
277 Scheduler.Run(MBB, I, Current, CurrentCount);
278 Scheduler.EmitSchedule(0);
280 CurrentCount = Count - 1;
281 Scheduler.Observe(MI, CurrentCount);
286 assert(Count == 0 && "Instruction count mismatch!");
287 assert((MBB->begin() == Current || CurrentCount != 0) &&
288 "Instruction count mismatch!");
289 Scheduler.Run(MBB, MBB->begin(), Current, CurrentCount);
290 Scheduler.EmitSchedule(0);
292 // Clean up register live-range state.
293 Scheduler.FinishBlock();
295 // Update register kills
296 Scheduler.FixupKills(MBB);
305 /// StartBlock - Initialize register live-range state for scheduling in
308 void SchedulePostRATDList::StartBlock(MachineBasicBlock *BB) {
309 // Call the superclass.
310 ScheduleDAGInstrs::StartBlock(BB);
312 // Reset the hazard recognizer and anti-dep breaker.
314 if (AntiDepBreak != NULL)
315 AntiDepBreak->StartBlock(BB);
318 /// Schedule - Schedule the instruction range using list scheduling.
320 void SchedulePostRATDList::Schedule() {
321 // Build the scheduling graph.
324 if (AntiDepBreak != NULL) {
325 AntiDepBreaker::CandidateMap AntiDepCandidates;
326 const bool NeedCandidates = AntiDepBreak->NeedCandidates();
328 for (unsigned i = 0, Trials = AntiDepBreak->GetMaxTrials();
330 DEBUG(errs() << "\n********** Break Anti-Deps, Trial " <<
331 i << " **********\n");
333 // If candidates are required, then schedule forward ignoring
334 // anti-dependencies to collect the candidate operands for
335 // anti-dependence breaking. The candidates will be the def
336 // operands for the anti-dependencies that if broken would allow
337 // an improved schedule
338 if (NeedCandidates) {
339 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
340 SUnits[su].dumpAll(this));
342 AntiDepCandidates.clear();
343 AvailableQueue.initNodes(SUnits);
344 ListScheduleTopDown(&AntiDepCandidates);
345 AvailableQueue.releaseState();
349 AntiDepBreak->BreakAntiDependencies(SUnits, AntiDepCandidates,
350 Begin, InsertPos, InsertPosIndex);
352 // We made changes. Update the dependency graph.
353 // Theoretically we could update the graph in place:
354 // When a live range is changed to use a different register, remove
355 // the def's anti-dependence *and* output-dependence edges due to
356 // that register, and add new anti-dependence and output-dependence
357 // edges based on the next live range of the register.
358 if ((Broken != 0) || NeedCandidates) {
366 NumFixedAnti += Broken;
372 DEBUG(errs() << "********** List Scheduling **********\n");
373 DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
374 SUnits[su].dumpAll(this));
376 AvailableQueue.initNodes(SUnits);
377 ListScheduleTopDown(NULL);
378 AvailableQueue.releaseState();
381 /// Observe - Update liveness information to account for the current
382 /// instruction, which will not be scheduled.
384 void SchedulePostRATDList::Observe(MachineInstr *MI, unsigned Count) {
385 if (AntiDepBreak != NULL)
386 AntiDepBreak->Observe(MI, Count, InsertPosIndex);
389 /// FinishBlock - Clean up register live-range state.
391 void SchedulePostRATDList::FinishBlock() {
392 if (AntiDepBreak != NULL)
393 AntiDepBreak->FinishBlock();
395 // Call the superclass.
396 ScheduleDAGInstrs::FinishBlock();
399 /// StartBlockForKills - Initialize register live-range state for updating kills
401 void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
402 // Initialize the indices to indicate that no registers are live.
403 std::fill(KillIndices, array_endof(KillIndices), ~0u);
405 // Determine the live-out physregs for this block.
406 if (!BB->empty() && BB->back().getDesc().isReturn()) {
407 // In a return block, examine the function live-out regs.
408 for (MachineRegisterInfo::liveout_iterator I = MRI.liveout_begin(),
409 E = MRI.liveout_end(); I != E; ++I) {
411 KillIndices[Reg] = BB->size();
412 // Repeat, for all subregs.
413 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
415 KillIndices[*Subreg] = BB->size();
420 // In a non-return block, examine the live-in regs of all successors.
421 for (MachineBasicBlock::succ_iterator SI = BB->succ_begin(),
422 SE = BB->succ_end(); SI != SE; ++SI) {
423 for (MachineBasicBlock::livein_iterator I = (*SI)->livein_begin(),
424 E = (*SI)->livein_end(); I != E; ++I) {
426 KillIndices[Reg] = BB->size();
427 // Repeat, for all subregs.
428 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
430 KillIndices[*Subreg] = BB->size();
437 bool SchedulePostRATDList::ToggleKillFlag(MachineInstr *MI,
438 MachineOperand &MO) {
439 // Setting kill flag...
445 // If MO itself is live, clear the kill flag...
446 if (KillIndices[MO.getReg()] != ~0u) {
451 // If any subreg of MO is live, then create an imp-def for that
452 // subreg and keep MO marked as killed.
455 const unsigned SuperReg = MO.getReg();
456 for (const unsigned *Subreg = TRI->getSubRegisters(SuperReg);
458 if (KillIndices[*Subreg] != ~0u) {
459 MI->addOperand(MachineOperand::CreateReg(*Subreg,
473 /// FixupKills - Fix the register kill flags, they may have been made
474 /// incorrect by instruction reordering.
476 void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
477 DEBUG(errs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
479 std::set<unsigned> killedRegs;
480 BitVector ReservedRegs = TRI->getReservedRegs(MF);
482 StartBlockForKills(MBB);
484 // Examine block from end to start...
485 unsigned Count = MBB->size();
486 for (MachineBasicBlock::iterator I = MBB->end(), E = MBB->begin();
488 MachineInstr *MI = --I;
490 // Update liveness. Registers that are defed but not used in this
491 // instruction are now dead. Mark register and all subregs as they
492 // are completely defined.
493 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
494 MachineOperand &MO = MI->getOperand(i);
495 if (!MO.isReg()) continue;
496 unsigned Reg = MO.getReg();
497 if (Reg == 0) continue;
498 if (!MO.isDef()) continue;
499 // Ignore two-addr defs.
500 if (MI->isRegTiedToUseOperand(i)) continue;
502 KillIndices[Reg] = ~0u;
504 // Repeat for all subregs.
505 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
507 KillIndices[*Subreg] = ~0u;
511 // Examine all used registers and set/clear kill flag. When a
512 // register is used multiple times we only set the kill flag on
515 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
516 MachineOperand &MO = MI->getOperand(i);
517 if (!MO.isReg() || !MO.isUse()) continue;
518 unsigned Reg = MO.getReg();
519 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
522 if (killedRegs.find(Reg) == killedRegs.end()) {
524 // A register is not killed if any subregs are live...
525 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
527 if (KillIndices[*Subreg] != ~0u) {
533 // If subreg is not live, then register is killed if it became
534 // live in this instruction
536 kill = (KillIndices[Reg] == ~0u);
539 if (MO.isKill() != kill) {
540 bool removed = ToggleKillFlag(MI, MO);
542 DEBUG(errs() << "Fixed <removed> in ");
544 DEBUG(errs() << "Fixed " << MO << " in ");
549 killedRegs.insert(Reg);
552 // Mark any used register (that is not using undef) and subregs as
554 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
555 MachineOperand &MO = MI->getOperand(i);
556 if (!MO.isReg() || !MO.isUse() || MO.isUndef()) continue;
557 unsigned Reg = MO.getReg();
558 if ((Reg == 0) || ReservedRegs.test(Reg)) continue;
560 KillIndices[Reg] = Count;
562 for (const unsigned *Subreg = TRI->getSubRegisters(Reg);
564 KillIndices[*Subreg] = Count;
570 //===----------------------------------------------------------------------===//
571 // Top-Down Scheduling
572 //===----------------------------------------------------------------------===//
574 /// ReleaseSucc - Decrement the NumPredsLeft count of a successor. Add it to
575 /// the PendingQueue if the count reaches zero. Also update its cycle bound.
576 void SchedulePostRATDList::ReleaseSucc(SUnit *SU, SDep *SuccEdge,
577 bool IgnoreAntiDep) {
578 SUnit *SuccSU = SuccEdge->getSUnit();
581 if (SuccSU->NumPredsLeft == 0) {
582 errs() << "*** Scheduling failed! ***\n";
584 errs() << " has been released too many times!\n";
588 --SuccSU->NumPredsLeft;
590 // Compute how many cycles it will be before this actually becomes
591 // available. This is the max of the start time of all predecessors plus
593 SuccSU->setDepthToAtLeast(SU->getDepth(IgnoreAntiDep) +
594 SuccEdge->getLatency(), IgnoreAntiDep);
596 // If all the node's predecessors are scheduled, this node is ready
597 // to be scheduled. Ignore the special ExitSU node.
598 if (SuccSU->NumPredsLeft == 0 && SuccSU != &ExitSU)
599 PendingQueue.push_back(SuccSU);
602 /// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
603 void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU, bool IgnoreAntiDep) {
604 for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
606 if (IgnoreAntiDep && (I->getKind() == SDep::Anti)) continue;
607 ReleaseSucc(SU, &*I, IgnoreAntiDep);
611 /// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
612 /// count of its successors. If a successor pending count is zero, add it to
613 /// the Available queue.
614 void SchedulePostRATDList::ScheduleNodeTopDown(SUnit *SU, unsigned CurCycle,
615 bool IgnoreAntiDep) {
616 DEBUG(errs() << "*** Scheduling [" << CurCycle << "]: ");
617 DEBUG(SU->dump(this));
619 Sequence.push_back(SU);
620 assert(CurCycle >= SU->getDepth(IgnoreAntiDep) &&
621 "Node scheduled above its depth!");
622 SU->setDepthToAtLeast(CurCycle, IgnoreAntiDep);
624 ReleaseSuccessors(SU, IgnoreAntiDep);
625 SU->isScheduled = true;
626 AvailableQueue.ScheduledNode(SU);
629 /// ListScheduleTopDown - The main loop of list scheduling for top-down
631 void SchedulePostRATDList::ListScheduleTopDown(
632 AntiDepBreaker::CandidateMap *AntiDepCandidates) {
633 unsigned CurCycle = 0;
634 const bool IgnoreAntiDep = (AntiDepCandidates != NULL);
636 // We're scheduling top-down but we're visiting the regions in
637 // bottom-up order, so we don't know the hazards at the start of a
638 // region. So assume no hazards (this should usually be ok as most
639 // blocks are a single region).
642 // If ignoring anti-dependencies, the Schedule DAG still has Anti
643 // dep edges, but we ignore them for scheduling purposes
644 AvailableQueue.setIgnoreAntiDep(IgnoreAntiDep);
646 // Release any successors of the special Entry node.
647 ReleaseSuccessors(&EntrySU, IgnoreAntiDep);
649 // Add all leaves to Available queue. If ignoring antideps we also
650 // adjust the predecessor count for each node to not include antidep
652 for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
653 // It is available if it has no predecessors.
654 bool available = SUnits[i].Preds.empty();
655 // If we are ignoring anti-dependencies then a node that has only
656 // anti-dep predecessors is available.
657 if (!available && IgnoreAntiDep) {
659 for (SUnit::const_pred_iterator I = SUnits[i].Preds.begin(),
660 E = SUnits[i].Preds.end(); I != E; ++I) {
661 if (I->getKind() != SDep::Anti) {
664 SUnits[i].NumPredsLeft -= 1;
670 AvailableQueue.push(&SUnits[i]);
671 SUnits[i].isAvailable = true;
675 // In any cycle where we can't schedule any instructions, we must
676 // stall or emit a noop, depending on the target.
677 bool CycleHasInsts = false;
679 // While Available queue is not empty, grab the node with the highest
680 // priority. If it is not ready put it back. Schedule the node.
681 std::vector<SUnit*> NotReady;
682 Sequence.reserve(SUnits.size());
683 while (!AvailableQueue.empty() || !PendingQueue.empty()) {
684 // Check to see if any of the pending instructions are ready to issue. If
685 // so, add them to the available queue.
686 unsigned MinDepth = ~0u;
687 for (unsigned i = 0, e = PendingQueue.size(); i != e; ++i) {
688 if (PendingQueue[i]->getDepth(IgnoreAntiDep) <= CurCycle) {
689 AvailableQueue.push(PendingQueue[i]);
690 PendingQueue[i]->isAvailable = true;
691 PendingQueue[i] = PendingQueue.back();
692 PendingQueue.pop_back();
694 } else if (PendingQueue[i]->getDepth(IgnoreAntiDep) < MinDepth)
695 MinDepth = PendingQueue[i]->getDepth(IgnoreAntiDep);
698 DEBUG(errs() << "\n*** Examining Available\n";
699 LatencyPriorityQueue q = AvailableQueue;
702 errs() << "Height " << su->getHeight(IgnoreAntiDep) << ": ";
706 SUnit *FoundSUnit = 0;
707 bool HasNoopHazards = false;
708 while (!AvailableQueue.empty()) {
709 SUnit *CurSUnit = AvailableQueue.pop();
711 ScheduleHazardRecognizer::HazardType HT =
712 HazardRec->getHazardType(CurSUnit);
713 if (HT == ScheduleHazardRecognizer::NoHazard) {
714 FoundSUnit = CurSUnit;
718 // Remember if this is a noop hazard.
719 HasNoopHazards |= HT == ScheduleHazardRecognizer::NoopHazard;
721 NotReady.push_back(CurSUnit);
724 // Add the nodes that aren't ready back onto the available list.
725 if (!NotReady.empty()) {
726 AvailableQueue.push_all(NotReady);
730 // If we found a node to schedule...
732 // If we are ignoring anti-dependencies and the SUnit we are
733 // scheduling has an antidep predecessor that has not been
734 // scheduled, then we will need to break that antidep if we want
735 // to get this schedule when not ignoring anti-dependencies.
737 AntiDepBreaker::AntiDepRegVector AntiDepRegs;
738 for (SUnit::const_pred_iterator I = FoundSUnit->Preds.begin(),
739 E = FoundSUnit->Preds.end(); I != E; ++I) {
740 if ((I->getKind() == SDep::Anti) && !I->getSUnit()->isScheduled)
741 AntiDepRegs.push_back(I->getReg());
744 if (AntiDepRegs.size() > 0) {
745 DEBUG(errs() << "*** AntiDep Candidate: ");
746 DEBUG(FoundSUnit->dump(this));
747 AntiDepCandidates->insert(
748 AntiDepBreaker::CandidateMap::value_type(FoundSUnit, AntiDepRegs));
752 // ... schedule the node...
753 ScheduleNodeTopDown(FoundSUnit, CurCycle, IgnoreAntiDep);
754 HazardRec->EmitInstruction(FoundSUnit);
755 CycleHasInsts = true;
757 // If we are using the target-specific hazards, then don't
758 // advance the cycle time just because we schedule a node. If
759 // the target allows it we can schedule multiple nodes in the
761 if (!EnablePostRAHazardAvoidance) {
762 if (FoundSUnit->Latency) // Don't increment CurCycle for pseudo-ops!
767 DEBUG(errs() << "*** Finished cycle " << CurCycle << '\n');
768 HazardRec->AdvanceCycle();
769 } else if (!HasNoopHazards) {
770 // Otherwise, we have a pipeline stall, but no other problem,
771 // just advance the current cycle and try again.
772 DEBUG(errs() << "*** Stall in cycle " << CurCycle << '\n');
773 HazardRec->AdvanceCycle();
777 // Otherwise, we have no instructions to issue and we have instructions
778 // that will fault if we don't do this right. This is the case for
779 // processors without pipeline interlocks and other cases.
780 DEBUG(errs() << "*** Emitting noop in cycle " << CurCycle << '\n');
781 HazardRec->EmitNoop();
782 Sequence.push_back(0); // NULL here means noop
788 CycleHasInsts = false;
793 VerifySchedule(/*isBottomUp=*/false);
797 //===----------------------------------------------------------------------===//
798 // Public Constructor Functions
799 //===----------------------------------------------------------------------===//
801 FunctionPass *llvm::createPostRAScheduler(CodeGenOpt::Level OptLevel) {
802 return new PostRAScheduler(OptLevel);