// Check for explicit enable/disable of post-ra scheduling.
TargetSubtarget::AntiDepBreakMode AntiDepMode = TargetSubtarget::ANTIDEP_NONE;
+ SmallVector<TargetRegisterClass*, 4> CriticalPathRCs;
if (EnablePostRAScheduler.getPosition() > 0) {
if (!EnablePostRAScheduler)
return false;
} else {
// Check that post-RA scheduling is enabled for this target.
const TargetSubtarget &ST = Fn.getTarget().getSubtarget<TargetSubtarget>();
- if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode))
+ if (!ST.enablePostRAScheduler(OptLevel, AntiDepMode, CriticalPathRCs))
return false;
}
(ScheduleHazardRecognizer *)new SimpleHazardRecognizer();
AntiDepBreaker *ADB =
((AntiDepMode == TargetSubtarget::ANTIDEP_ALL) ?
- (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn) :
+ (AntiDepBreaker *)new AggressiveAntiDepBreaker(Fn, CriticalPathRCs) :
((AntiDepMode == TargetSubtarget::ANTIDEP_CRITICAL) ?
(AntiDepBreaker *)new CriticalAntiDepBreaker(Fn) : NULL));
if (bbcnt++ % DebugDiv != DebugMod)
continue;
errs() << "*** DEBUG scheduling " << Fn.getFunction()->getNameStr() <<
- ":MBB ID#" << MBB->getNumber() << " ***\n";
+ ":BB#" << MBB->getNumber() << " ***\n";
}
#endif
BuildSchedGraph(AA);
if (AntiDepBreak != NULL) {
- for (unsigned i = 0, Trials = AntiDepBreak->GetMaxTrials();
- i < Trials; ++i) {
- DEBUG(errs() << "********** Break Anti-Deps, Trial " <<
- i << " **********\n");
- unsigned Broken =
- AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
- InsertPosIndex);
- if (Broken == 0)
- break;
-
+ unsigned Broken =
+ AntiDepBreak->BreakAntiDependencies(SUnits, Begin, InsertPos,
+ InsertPosIndex);
+
+ if (Broken != 0) {
// We made changes. Update the dependency graph.
// Theoretically we could update the graph in place:
// When a live range is changed to use a different register, remove
// that register, and add new anti-dependence and output-dependence
// edges based on the next live range of the register.
SUnits.clear();
+ Sequence.clear();
EntrySU = SUnit();
ExitSU = SUnit();
BuildSchedGraph(AA);
-
+
NumFixedAnti += Broken;
}
}
DEBUG(errs() << "********** List Scheduling **********\n");
-
DEBUG(for (unsigned su = 0, e = SUnits.size(); su != e; ++su)
SUnits[su].dumpAll(this));
AvailableQueue.initNodes(SUnits);
-
ListScheduleTopDown();
-
AvailableQueue.releaseState();
}
///
void SchedulePostRATDList::StartBlockForKills(MachineBasicBlock *BB) {
// Initialize the indices to indicate that no registers are live.
- std::fill(KillIndices, array_endof(KillIndices), ~0u);
+ for (unsigned i = 0; i < TRI->getNumRegs(); ++i)
+ KillIndices[i] = ~0u;
// Determine the live-out physregs for this block.
if (!BB->empty() && BB->back().getDesc().isReturn()) {
/// incorrect by instruction reordering.
///
void SchedulePostRATDList::FixupKills(MachineBasicBlock *MBB) {
- DEBUG(errs() << "Fixup kills for BB ID#" << MBB->getNumber() << '\n');
+ DEBUG(errs() << "Fixup kills for BB#" << MBB->getNumber() << '\n');
std::set<unsigned> killedRegs;
BitVector ReservedRegs = TRI->getReservedRegs(MF);
}
if (MO.isKill() != kill) {
- bool removed = ToggleKillFlag(MI, MO);
- if (removed) {
- DEBUG(errs() << "Fixed <removed> in ");
- } else {
- DEBUG(errs() << "Fixed " << MO << " in ");
- }
+ DEBUG(errs() << "Fixing " << MO << " in ");
+ // Warning: ToggleKillFlag may invalidate MO.
+ ToggleKillFlag(MI, MO);
DEBUG(MI->dump());
}
/// ReleaseSuccessors - Call ReleaseSucc on each of SU's successors.
void SchedulePostRATDList::ReleaseSuccessors(SUnit *SU) {
for (SUnit::succ_iterator I = SU->Succs.begin(), E = SU->Succs.end();
- I != E; ++I)
+ I != E; ++I) {
ReleaseSucc(SU, &*I);
+ }
}
/// ScheduleNodeTopDown - Add the node to the schedule. Decrement the pending
DEBUG(SU->dump(this));
Sequence.push_back(SU);
- assert(CurCycle >= SU->getDepth() && "Node scheduled above its depth!");
+ assert(CurCycle >= SU->getDepth() &&
+ "Node scheduled above its depth!");
SU->setDepthToAtLeast(CurCycle);
ReleaseSuccessors(SU);
/// schedulers.
void SchedulePostRATDList::ListScheduleTopDown() {
unsigned CurCycle = 0;
+
+ // We're scheduling top-down but we're visiting the regions in
+ // bottom-up order, so we don't know the hazards at the start of a
+ // region. So assume no hazards (this should usually be ok as most
+ // blocks are a single region).
+ HazardRec->Reset();
// Release any successors of the special Entry node.
ReleaseSuccessors(&EntrySU);
- // All leaves to Available queue.
+ // Add all leaves to Available queue.
for (unsigned i = 0, e = SUnits.size(); i != e; ++i) {
// It is available if it has no predecessors.
- if (SUnits[i].Preds.empty()) {
+ bool available = SUnits[i].Preds.empty();
+ if (available) {
AvailableQueue.push(&SUnits[i]);
SUnits[i].isAvailable = true;
}
});
SUnit *FoundSUnit = 0;
-
bool HasNoopHazards = false;
while (!AvailableQueue.empty()) {
SUnit *CurSUnit = AvailableQueue.pop();
NotReady.clear();
}
- // If we found a node to schedule, do it now.
+ // If we found a node to schedule...
if (FoundSUnit) {
+ // ... schedule the node...
ScheduleNodeTopDown(FoundSUnit, CurCycle);
HazardRec->EmitInstruction(FoundSUnit);
CycleHasInsts = true;