ScheduleDAGInstrs::ScheduleDAGInstrs(MachineFunction &mf,
const MachineLoopInfo &mli,
const MachineDominatorTree &mdt)
- : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {}
+ : ScheduleDAG(mf), MLI(mli), MDT(mdt), LoopRegs(MLI, MDT) {
+ MFI = mf.getFrameInfo();
+ DbgValueVec.clear();
+}
/// Run - perform scheduling.
///
} else {
return V;
}
- assert(isa<IntegerType>(V->getType()) && "Unexpected operand type!");
+ assert(V->getType()->isIntegerTy() && "Unexpected operand type!");
} while (1);
}
break;
const Value *O = getUnderlyingObjectFromInt(cast<User>(V)->getOperand(0));
// If that succeeded in finding a pointer, continue the search.
- if (!isa<PointerType>(O->getType()))
+ if (!O->getType()->isPointerTy())
break;
V = O;
} while (1);
/// getUnderlyingObjectForInstr - If this machine instr has memory reference
/// information and it can be tracked to a normal reference to a known
/// object, return the Value for that object. Otherwise return null.
-static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI) {
+static const Value *getUnderlyingObjectForInstr(const MachineInstr *MI,
+ const MachineFrameInfo *MFI,
+ bool &MayAlias) {
+ MayAlias = true;
if (!MI->hasOneMemOperand() ||
!(*MI->memoperands_begin())->getValue() ||
(*MI->memoperands_begin())->isVolatile())
return 0;
V = getUnderlyingObject(V);
- if (!isa<PseudoSourceValue>(V) && !isIdentifiedObject(V))
- return 0;
+ if (const PseudoSourceValue *PSV = dyn_cast<PseudoSourceValue>(V)) {
+ // For now, ignore PseudoSourceValues which may alias LLVM IR values
+ // because the code that uses this function has no way to cope with
+ // such aliases.
+ if (PSV->isAliased(MFI))
+ return 0;
+
+ MayAlias = PSV->mayAlias(MFI);
+ return V;
+ }
- return V;
+ if (isIdentifiedObject(V))
+ return V;
+
+ return 0;
}
void ScheduleDAGInstrs::StartBlock(MachineBasicBlock *BB) {
}
}
-void ScheduleDAGInstrs::BuildSchedGraph() {
+void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) {
// We'll be allocating one SUnit for each instruction, plus one for
// the region exit node.
SUnits.reserve(BB->size());
// We build scheduling units by walking a block's instruction list from bottom
// to top.
- // Remember where a generic side-effecting instruction is as we procede. If
- // ChainMMO is null, this is assumed to have arbitrary side-effects. If
- // ChainMMO is non-null, then Chain makes only a single memory reference.
- SUnit *Chain = 0;
- MachineMemOperand *ChainMMO = 0;
+ // Remember where a generic side-effecting instruction is as we procede.
+ SUnit *BarrierChain = 0, *AliasChain = 0;
- // Memory references to specific known memory locations are tracked so that
- // they can be given more precise dependencies.
- std::map<const Value *, SUnit *> MemDefs;
- std::map<const Value *, std::vector<SUnit *> > MemUses;
+ // Memory references to specific known memory locations are tracked
+ // so that they can be given more precise dependencies. We track
+ // separately the known memory locations that may alias and those
+ // that are known not to alias
+ std::map<const Value *, SUnit *> AliasMemDefs, NonAliasMemDefs;
+ std::map<const Value *, std::vector<SUnit *> > AliasMemUses, NonAliasMemUses;
+
+ // Keep track of dangling debug references to registers.
+ std::pair<MachineInstr*, unsigned>
+ DanglingDebugValue[TargetRegisterInfo::FirstVirtualRegister];
// Check to see if the scheduler cares about latencies.
bool UnitLatencies = ForceUnitLatencies();
const TargetSubtarget &ST = TM.getSubtarget<TargetSubtarget>();
unsigned SpecialAddressLatency = ST.getSpecialAddressLatency();
+ // Remove any stale debug info; sometimes BuildSchedGraph is called again
+ // without emitting the info from the previous call.
+ DbgValueVec.clear();
+ std::memset(DanglingDebugValue, 0, sizeof(DanglingDebugValue));
+
// Walk the list of instructions, from bottom moving up.
for (MachineBasicBlock::iterator MII = InsertPos, MIE = Begin;
MII != MIE; --MII) {
MachineInstr *MI = prior(MII);
+ // DBG_VALUE does not have SUnit's built, so just remember these for later
+ // reinsertion.
+ if (MI->isDebugValue()) {
+ if (MI->getNumOperands()==3 && MI->getOperand(0).isReg() &&
+ MI->getOperand(0).getReg())
+ DanglingDebugValue[MI->getOperand(0).getReg()] =
+ std::make_pair(MI, DbgValueVec.size());
+ DbgValueVec.push_back(MI);
+ continue;
+ }
const TargetInstrDesc &TID = MI->getDesc();
assert(!TID.isTerminator() && !MI->isLabel() &&
"Cannot schedule terminators or labels!");
if (Reg == 0) continue;
assert(TRI->isPhysicalRegister(Reg) && "Virtual register encountered!");
+
+ if (MO.isDef() && DanglingDebugValue[Reg].first!=0) {
+ SU->DbgInstrList.push_back(DanglingDebugValue[Reg].first);
+ DbgValueVec[DanglingDebugValue[Reg].second] = 0;
+ DanglingDebugValue[Reg] = std::make_pair((MachineInstr*)0, 0);
+ }
+
std::vector<SUnit *> &UseList = Uses[Reg];
std::vector<SUnit *> &DefList = Defs[Reg];
// Optionally add output and anti dependencies. For anti
SUnit *DefSU = DefList[i];
if (DefSU != SU &&
(Kind != SDep::Output || !MO.isDead() ||
- !DefSU->getInstr()->registerDefIsDead(Reg)))
+ !DefSU->getInstr()->registerDefIsDead(*Alias)))
DefSU->addPred(SDep(SU, Kind, AOLatency, /*Reg=*/ *Alias));
}
}
unsigned DataLatency = SU->Latency;
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
- if (UseSU != SU) {
- unsigned LDataLatency = DataLatency;
- // Optionally add in a special extra latency for nodes that
- // feed addresses.
- // TODO: Do this for register aliases too.
- // TODO: Perhaps we should get rid of
- // SpecialAddressLatency and just move this into
- // adjustSchedDependency for the targets that care about
- // it.
- if (SpecialAddressLatency != 0 && !UnitLatencies) {
- MachineInstr *UseMI = UseSU->getInstr();
- const TargetInstrDesc &UseTID = UseMI->getDesc();
- int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
- assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
- if ((UseTID.mayLoad() || UseTID.mayStore()) &&
- (unsigned)RegUseIndex < UseTID.getNumOperands() &&
- UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
- LDataLatency += SpecialAddressLatency;
- }
- // Adjust the dependence latency using operand def/use
- // information (if any), and then allow the target to
- // perform its own adjustments.
- const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, (SDep &)dep);
- ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
- }
- UseSU->addPred(dep);
+ if (UseSU == SU)
+ continue;
+ unsigned LDataLatency = DataLatency;
+ // Optionally add in a special extra latency for nodes that
+ // feed addresses.
+ // TODO: Do this for register aliases too.
+ // TODO: Perhaps we should get rid of
+ // SpecialAddressLatency and just move this into
+ // adjustSchedDependency for the targets that care about it.
+ if (SpecialAddressLatency != 0 && !UnitLatencies) {
+ MachineInstr *UseMI = UseSU->getInstr();
+ const TargetInstrDesc &UseTID = UseMI->getDesc();
+ int RegUseIndex = UseMI->findRegisterUseOperandIdx(Reg);
+ assert(RegUseIndex >= 0 && "UseMI doesn's use register!");
+ if ((UseTID.mayLoad() || UseTID.mayStore()) &&
+ (unsigned)RegUseIndex < UseTID.getNumOperands() &&
+ UseTID.OpInfo[RegUseIndex].isLookupPtrRegClass())
+ LDataLatency += SpecialAddressLatency;
+ }
+ // Adjust the dependence latency using operand def/use
+ // information (if any), and then allow the target to
+ // perform its own adjustments.
+ const SDep& dep = SDep(SU, SDep::Data, LDataLatency, Reg);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
+ UseSU->addPred(dep);
}
for (const unsigned *Alias = TRI->getAliasSet(Reg); *Alias; ++Alias) {
std::vector<SUnit *> &UseList = Uses[*Alias];
for (unsigned i = 0, e = UseList.size(); i != e; ++i) {
SUnit *UseSU = UseList[i];
- if (UseSU != SU) {
- const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
- if (!UnitLatencies) {
- ComputeOperandLatency(SU, UseSU, (SDep &)dep);
- ST.adjustSchedDependency(SU, UseSU, (SDep &)dep);
- }
- UseSU->addPred(dep);
+ if (UseSU == SU)
+ continue;
+ const SDep& dep = SDep(SU, SDep::Data, DataLatency, *Alias);
+ if (!UnitLatencies) {
+ ComputeOperandLatency(SU, UseSU, const_cast<SDep &>(dep));
+ ST.adjustSchedDependency(SU, UseSU, const_cast<SDep &>(dep));
}
+ UseSU->addPred(dep);
}
}
}
// Add chain dependencies.
+ // Chain dependencies used to enforce memory order should have
+ // latency of 0 (except for true dependency of Store followed by
+ // aliased Load... we estimate that with a single cycle of latency
+ // assuming the hardware will bypass)
// Note that isStoreToStackSlot and isLoadFromStackSLot are not usable
// after stack slots are lowered to actual addresses.
// TODO: Use an AliasAnalysis and do real alias-analysis queries, and
// produce more precise dependence information.
- if (TID.isCall() || TID.hasUnmodeledSideEffects()) {
- new_chain:
- // This is the conservative case. Add dependencies on all memory
- // references.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
- Chain = SU;
+#define STORE_LOAD_LATENCY 1
+ unsigned TrueMemOrderLatency = 0;
+ if (TID.isCall() || TID.hasUnmodeledSideEffects() ||
+ (MI->hasVolatileMemoryRef() &&
+ (!TID.mayLoad() || !MI->isInvariantLoad(AA)))) {
+ // Be conservative with these and add dependencies on all memory
+ // references, even those that are known to not alias.
+ for (std::map<const Value *, SUnit *>::iterator I =
+ NonAliasMemDefs.begin(), E = NonAliasMemDefs.end(); I != E; ++I) {
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ }
+ for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
+ NonAliasMemUses.begin(), E = NonAliasMemUses.end(); I != E; ++I) {
+ for (unsigned i = 0, e = I->second.size(); i != e; ++i)
+ I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ }
+ NonAliasMemDefs.clear();
+ NonAliasMemUses.clear();
+ // Add SU to the barrier chain.
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ BarrierChain = SU;
+
+ // fall-through
+ new_alias_chain:
+ // Chain all possibly aliasing memory references though SU.
+ if (AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ AliasChain = SU;
for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
- PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
- PendingLoads.clear();
- for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
- E = MemDefs.end(); I != E; ++I) {
- I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
- I->second = SU;
+ PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ for (std::map<const Value *, SUnit *>::iterator I = AliasMemDefs.begin(),
+ E = AliasMemDefs.end(); I != E; ++I) {
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
}
for (std::map<const Value *, std::vector<SUnit *> >::iterator I =
- MemUses.begin(), E = MemUses.end(); I != E; ++I) {
+ AliasMemUses.begin(), E = AliasMemUses.end(); I != E; ++I) {
for (unsigned i = 0, e = I->second.size(); i != e; ++i)
- I->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency));
- I->second.clear();
+ I->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
}
- // See if it is known to just have a single memory reference.
- MachineInstr *ChainMI = Chain->getInstr();
- const TargetInstrDesc &ChainTID = ChainMI->getDesc();
- if (!ChainTID.isCall() &&
- !ChainTID.hasUnmodeledSideEffects() &&
- ChainMI->hasOneMemOperand() &&
- !(*ChainMI->memoperands_begin())->isVolatile() &&
- (*ChainMI->memoperands_begin())->getValue())
- // We know that the Chain accesses one specific memory location.
- ChainMMO = *ChainMI->memoperands_begin();
- else
- // Unknown memory accesses. Assume the worst.
- ChainMMO = 0;
+ PendingLoads.clear();
+ AliasMemDefs.clear();
+ AliasMemUses.clear();
} else if (TID.mayStore()) {
- if (const Value *V = getUnderlyingObjectForInstr(MI)) {
+ bool MayAlias = true;
+ TrueMemOrderLatency = STORE_LOAD_LATENCY;
+ if (const Value *V = getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
// A store to a specific PseudoSourceValue. Add precise dependencies.
- // Handle the def in MemDefs, if there is one.
- std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
- if (I != MemDefs.end()) {
- I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
+ // Record the def in MemDefs, first adding a dep if there is
+ // an existing def.
+ std::map<const Value *, SUnit *>::iterator I =
+ ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
+ std::map<const Value *, SUnit *>::iterator IE =
+ ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
+ if (I != IE) {
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
/*isNormalMemory=*/true));
I->second = SU;
} else {
- MemDefs[V] = SU;
+ if (MayAlias)
+ AliasMemDefs[V] = SU;
+ else
+ NonAliasMemDefs[V] = SU;
}
// Handle the uses in MemUses, if there are any.
std::map<const Value *, std::vector<SUnit *> >::iterator J =
- MemUses.find(V);
- if (J != MemUses.end()) {
+ ((MayAlias) ? AliasMemUses.find(V) : NonAliasMemUses.find(V));
+ std::map<const Value *, std::vector<SUnit *> >::iterator JE =
+ ((MayAlias) ? AliasMemUses.end() : NonAliasMemUses.end());
+ if (J != JE) {
for (unsigned i = 0, e = J->second.size(); i != e; ++i)
- J->second[i]->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
- /*isNormalMemory=*/true));
+ J->second[i]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency,
+ /*Reg=*/0, /*isNormalMemory=*/true));
J->second.clear();
}
- // Add dependencies from all the PendingLoads, since without
- // memoperands we must assume they alias anything.
- for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
- PendingLoads[k]->addPred(SDep(SU, SDep::Order, SU->Latency));
- // Add a general dependence too, if needed.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
- } else
+ if (MayAlias) {
+ // Add dependencies from all the PendingLoads, i.e. loads
+ // with no underlying object.
+ for (unsigned k = 0, m = PendingLoads.size(); k != m; ++k)
+ PendingLoads[k]->addPred(SDep(SU, SDep::Order, TrueMemOrderLatency));
+ // Add dependence on alias chain, if needed.
+ if (AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ }
+ // Add dependence on barrier chain, if needed.
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ } else {
// Treat all other stores conservatively.
- goto new_chain;
+ goto new_alias_chain;
+ }
} else if (TID.mayLoad()) {
- if (TII->isInvariantLoad(MI)) {
+ bool MayAlias = true;
+ TrueMemOrderLatency = 0;
+ if (MI->isInvariantLoad(AA)) {
// Invariant load, no chain dependencies needed!
- } else if (const Value *V = getUnderlyingObjectForInstr(MI)) {
- // A load from a specific PseudoSourceValue. Add precise dependencies.
- std::map<const Value *, SUnit *>::iterator I = MemDefs.find(V);
- if (I != MemDefs.end())
- I->second->addPred(SDep(SU, SDep::Order, SU->Latency, /*Reg=*/0,
- /*isNormalMemory=*/true));
- MemUses[V].push_back(SU);
-
- // Add a general dependence too, if needed.
- if (Chain && (!ChainMMO ||
- (ChainMMO->isStore() || ChainMMO->isVolatile())))
- Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
- } else if (MI->hasVolatileMemoryRef()) {
- // Treat volatile loads conservatively. Note that this includes
- // cases where memoperand information is unavailable.
- goto new_chain;
} else {
- // A normal load. Depend on the general chain, as well as on
- // all stores. In the absense of MachineMemOperand information,
- // we can't even assume that the load doesn't alias well-behaved
- // memory locations.
- if (Chain)
- Chain->addPred(SDep(SU, SDep::Order, SU->Latency));
- for (std::map<const Value *, SUnit *>::iterator I = MemDefs.begin(),
- E = MemDefs.end(); I != E; ++I)
- I->second->addPred(SDep(SU, SDep::Order, SU->Latency));
- PendingLoads.push_back(SU);
- }
+ if (const Value *V =
+ getUnderlyingObjectForInstr(MI, MFI, MayAlias)) {
+ // A load from a specific PseudoSourceValue. Add precise dependencies.
+ std::map<const Value *, SUnit *>::iterator I =
+ ((MayAlias) ? AliasMemDefs.find(V) : NonAliasMemDefs.find(V));
+ std::map<const Value *, SUnit *>::iterator IE =
+ ((MayAlias) ? AliasMemDefs.end() : NonAliasMemDefs.end());
+ if (I != IE)
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0, /*Reg=*/0,
+ /*isNormalMemory=*/true));
+ if (MayAlias)
+ AliasMemUses[V].push_back(SU);
+ else
+ NonAliasMemUses[V].push_back(SU);
+ } else {
+ // A load with no underlying object. Depend on all
+ // potentially aliasing stores.
+ for (std::map<const Value *, SUnit *>::iterator I =
+ AliasMemDefs.begin(), E = AliasMemDefs.end(); I != E; ++I)
+ I->second->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+
+ PendingLoads.push_back(SU);
+ MayAlias = true;
+ }
+
+ // Add dependencies on alias and barrier chains, if needed.
+ if (MayAlias && AliasChain)
+ AliasChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ if (BarrierChain)
+ BarrierChain->addPred(SDep(SU, SDep::Order, /*Latency=*/0));
+ }
}
}
MachineInstr *DefMI = Def->getInstr();
int DefIdx = DefMI->findRegisterDefOperandIdx(Reg);
if (DefIdx != -1) {
- int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(), DefIdx);
+ int DefCycle = InstrItins.getOperandCycle(DefMI->getDesc().getSchedClass(),
+ DefIdx);
if (DefCycle >= 0) {
MachineInstr *UseMI = Use->getInstr();
const unsigned UseClass = UseMI->getDesc().getSchedClass();
}
// EmitSchedule - Emit the machine code in scheduled order.
-MachineBasicBlock *ScheduleDAGInstrs::
-EmitSchedule(DenseMap<MachineBasicBlock*, MachineBasicBlock*> *EM) {
+MachineBasicBlock *ScheduleDAGInstrs::EmitSchedule() {
// For MachineInstr-based scheduling, we're rescheduling the instructions in
// the block, so start by removing them from the block.
while (Begin != InsertPos) {
BB->remove(I);
}
+ // First reinsert any remaining debug_values; these are either constants,
+ // or refer to live-in registers. The beginning of the block is the right
+ // place for the latter. The former might reasonably be placed elsewhere
+ // using some kind of ordering algorithm, but right now it doesn't matter.
+ for (int i = DbgValueVec.size()-1; i>=0; --i)
+ if (DbgValueVec[i])
+ BB->insert(InsertPos, DbgValueVec[i]);
+
// Then re-insert them according to the given schedule.
for (unsigned i = 0, e = Sequence.size(); i != e; i++) {
SUnit *SU = Sequence[i];
}
BB->insert(InsertPos, SU->getInstr());
+ for (unsigned i = 0, e = SU->DbgInstrList.size() ; i < e ; ++i)
+ BB->insert(InsertPos, SU->DbgInstrList[i]);
}
// Update the Begin iterator, as the first instruction in the block
// may have been scheduled later.
- if (!Sequence.empty())
+ if (!DbgValueVec.empty()) {
+ for (int i = DbgValueVec.size()-1; i>=0; --i)
+ if (DbgValueVec[i]!=0) {
+ Begin = DbgValueVec[DbgValueVec.size()-1];
+ break;
+ }
+ } else if (!Sequence.empty())
Begin = Sequence[0]->getInstr();
+ DbgValueVec.clear();
return BB;
}