// Get the Idx of the defining instructions.
SlotIndex defIndex = MIIdx.getRegSlot(MO.isEarlyClobber());
- // Make sure the first definition is not a partial redefinition. Add an
- // <imp-def> of the full register.
- // FIXME: LiveIntervals shouldn't modify the code like this. Whoever
- // created the machine instruction should annotate it with <undef> flags
- // as needed. Then we can simply assert here. The REG_SEQUENCE lowering
- // is the main suspect.
- if (MO.getSubReg()) {
- mi->addRegisterDefined(interval.reg);
- // Mark all defs of interval.reg on this instruction as reading <undef>.
- for (unsigned i = MOIdx, e = mi->getNumOperands(); i != e; ++i) {
- MachineOperand &MO2 = mi->getOperand(i);
- if (MO2.isReg() && MO2.getReg() == interval.reg && MO2.getSubReg())
- MO2.setIsUndef();
- }
- }
+ // Make sure the first definition is not a partial redefinition.
+ assert(!MO.readsReg() && "First def cannot also read virtual register "
+ "missing <undef> flag?");
VNInfo *ValNo = interval.getNextValue(defIndex, VNInfoAllocator);
assert(ValNo->id == 0 && "First value in interval is not 0?");
// IntervalUpdate class.
//===----------------------------------------------------------------------===//
-/// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
+// HMEditor is a toolkit used by handleMove to trim or extend live intervals.
class LiveIntervals::HMEditor {
private:
- LiveIntervals& lis;
- SlotIndex newIdx;
+ LiveIntervals& LIS;
+ const MachineRegisterInfo& MRI;
+ const TargetRegisterInfo& TRI;
+ SlotIndex NewIdx;
+
+ typedef std::pair<LiveInterval*, LiveRange*> IntRangePair;
+ typedef DenseSet<IntRangePair> RangeSet;
+
+ struct RegRanges {
+ LiveRange* Use;
+ LiveRange* EC;
+ LiveRange* Dead;
+ LiveRange* Def;
+ RegRanges() : Use(0), EC(0), Dead(0), Def(0) {}
+ };
+ typedef DenseMap<unsigned, RegRanges> BundleRanges;
public:
- HMEditor(LiveIntervals& lis, SlotIndex newIdx)
- : lis(lis), newIdx(newIdx) {}
-
- // Update lr to be defined at newIdx. Preserves lr.
- void moveDef(LiveRange& lr, LiveInterval& li) {
- lr.start = newIdx.getRegSlot();
- lr.valno->def = newIdx.getRegSlot();
- assert(intervalRangesSane(li) && "Broke live interval moving def.");
- }
+ HMEditor(LiveIntervals& LIS, const MachineRegisterInfo& MRI,
+ const TargetRegisterInfo& TRI, SlotIndex NewIdx)
+ : LIS(LIS), MRI(MRI), TRI(TRI), NewIdx(NewIdx) {}
- // Removes lr from li, inserting a new dead-def range starting at newIdx.
- void moveDeadDefOrEC(LiveRange& lr, LiveInterval& li, bool isEC) {
- LiveRange t(lr);
- t.start = newIdx.getRegSlot(isEC);
- t.valno->def = newIdx.getRegSlot(isEC);
- t.end = isEC ? newIdx.getRegSlot() : newIdx.getDeadSlot();
- li.removeRange(lr);
- li.addRange(t);
- assert(intervalRangesSane(li) && "Broke live interval moving dead def.");
- }
+ // Update intervals for all operands of MI from OldIdx to NewIdx.
+ // This assumes that MI used to be at OldIdx, and now resides at
+ // NewIdx.
+ void moveAllRangesFrom(MachineInstr* MI, SlotIndex OldIdx) {
+ assert(NewIdx != OldIdx && "No-op move? That's a bit strange.");
- void moveUseDown(SlotIndex oldIdx, LiveRange& lr, LiveInterval& li,
- const MachineBasicBlock* mbb) {
- bool liveThrough = lr.end > oldIdx.getRegSlot();
- if (!liveThrough) {
- // Easy fix - just update the range endpoint.
- lr.end = newIdx.getRegSlot();
- } else {
- bool liveOut = lr.end >= lis.getSlotIndexes()->getMBBEndIdx(mbb);
- if (!liveOut) {
- moveKillFlags(li.reg, lr.end, newIdx);
- lr.end = newIdx.getRegSlot();
- }
+ // Collect the operands.
+ RangeSet Entering, Internal, Exiting;
+ bool hasRegMaskOp = false;
+ collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
+
+ // To keep the LiveRanges valid within an interval, move the ranges closest
+ // to the destination first. This prevents ranges from overlapping, to that
+ // APIs like removeRange still work.
+ if (NewIdx < OldIdx) {
+ moveAllEnteringFrom(OldIdx, Entering);
+ moveAllInternalFrom(OldIdx, Internal);
+ moveAllExitingFrom(OldIdx, Exiting);
}
- assert(intervalRangesSane(li) && "Broke live interval moving use.");
+ else {
+ moveAllExitingFrom(OldIdx, Exiting);
+ moveAllInternalFrom(OldIdx, Internal);
+ moveAllEnteringFrom(OldIdx, Entering);
+ }
+
+ if (hasRegMaskOp)
+ updateRegMaskSlots(OldIdx);
+
+#ifndef NDEBUG
+ LIValidator validator;
+ validator = std::for_each(Entering.begin(), Entering.end(), validator);
+ validator = std::for_each(Internal.begin(), Internal.end(), validator);
+ validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
+ assert(validator.rangesOk() && "moveAllOperandsFrom broke liveness.");
+#endif
+
}
- // Returns the new
- void moveUseUp(SlotIndex oldIdx, LiveRange& lr, LiveInterval& li) {
- bool liveThrough = lr.end > oldIdx.getRegSlot();
- if (liveThrough)
- return; // If we moving up and live through there's nothing to do.
- SlotIndex lastUseInRange = newIdx.getRegSlot();
- for (MachineRegisterInfo::use_nodbg_iterator
- useI = lis.mri_->use_nodbg_begin(li.reg),
- useE = lis.mri_->use_nodbg_end();
- useI != useE; ++useI) {
- const MachineInstr* mopI = &*useI;
- const MachineOperand& mop = useI.getOperand();
- SlotIndex instSlot = lis.getSlotIndexes()->getInstructionIndex(mopI);
- SlotIndex opSlot = instSlot.getRegSlot(mop.isEarlyClobber());
- if (opSlot > lastUseInRange && opSlot < oldIdx)
- lastUseInRange = opSlot;
+ // Update intervals for all operands of MI to refer to BundleStart's
+ // SlotIndex.
+ void moveAllRangesInto(MachineInstr* MI, MachineInstr* BundleStart) {
+ if (MI == BundleStart)
+ return; // Bundling instr with itself - nothing to do.
+
+ SlotIndex OldIdx = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ assert(LIS.getSlotIndexes()->getInstructionFromIndex(OldIdx) == MI &&
+ "SlotIndex <-> Instruction mapping broken for MI");
+
+ // Collect all ranges already in the bundle.
+ MachineBasicBlock::instr_iterator BII(BundleStart);
+ RangeSet Entering, Internal, Exiting;
+ bool hasRegMaskOp = false;
+ collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
+ for (++BII; &*BII == MI || BII->isInsideBundle(); ++BII) {
+ if (&*BII == MI)
+ continue;
+ collectRanges(BII, Entering, Internal, Exiting, hasRegMaskOp, NewIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
}
- // If we found a new instr endpoint update the kill flags.
- if (lastUseInRange != newIdx.getRegSlot())
- moveKillFlags(li.reg, newIdx, lastUseInRange);
+ BundleRanges BR = createBundleRanges(Entering, Internal, Exiting);
+
+ collectRanges(MI, Entering, Internal, Exiting, hasRegMaskOp, OldIdx);
+ assert(!hasRegMaskOp && "Can't have RegMask operand in bundle.");
- // Fix up the range end.
- lr.end = lastUseInRange;
+ DEBUG(dbgs() << "Entering: " << Entering.size() << "\n");
+ DEBUG(dbgs() << "Internal: " << Internal.size() << "\n");
+ DEBUG(dbgs() << "Exiting: " << Exiting.size() << "\n");
- assert(intervalRangesSane(li) && "Broke live interval moving use.");
+ moveAllEnteringFromInto(OldIdx, Entering, BR);
+ moveAllInternalFromInto(OldIdx, Internal, BR);
+ moveAllExitingFromInto(OldIdx, Exiting, BR);
+
+
+#ifndef NDEBUG
+ LIValidator validator;
+ validator = std::for_each(Entering.begin(), Entering.end(), validator);
+ validator = std::for_each(Internal.begin(), Internal.end(), validator);
+ validator = std::for_each(Exiting.begin(), Exiting.end(), validator);
+ assert(validator.rangesOk() && "moveAllOperandsInto broke liveness.");
+#endif
}
- // Update intervals for all operands of mi from oldIndex to newIndex.
- void moveAllOperands(MachineInstr* mi, SlotIndex oldIdx) {
- // Figure out the direction we're moving.
- bool movingUp = newIdx < oldIdx;
+private:
- // Collect the operands.
- DenseSet<unsigned> uses, defs, deadDefs, ecs;
- for (MachineInstr::mop_iterator mopItr = mi->operands_begin(),
- mopEnd = mi->operands_end();
- mopItr != mopEnd; ++mopItr) {
- const MachineOperand& mop = *mopItr;
+#ifndef NDEBUG
+ class LIValidator {
+ private:
+ DenseSet<const LiveInterval*> Checked, Bogus;
+ public:
+ void operator()(const IntRangePair& P) {
+ const LiveInterval* LI = P.first;
+ if (Checked.count(LI))
+ return;
+ Checked.insert(LI);
+ if (LI->empty())
+ return;
+ SlotIndex LastEnd = LI->begin()->start;
+ for (LiveInterval::const_iterator LRI = LI->begin(), LRE = LI->end();
+ LRI != LRE; ++LRI) {
+ const LiveRange& LR = *LRI;
+ if (LastEnd > LR.start || LR.start >= LR.end)
+ Bogus.insert(LI);
+ LastEnd = LR.end;
+ }
+ }
- if (!mop.isReg() || mop.getReg() == 0)
+ bool rangesOk() const {
+ return Bogus.empty();
+ }
+ };
+#endif
+
+ // Collect IntRangePairs for all operands of MI that may need fixing.
+ // Treat's MI's index as OldIdx (regardless of what it is in SlotIndexes'
+ // maps).
+ void collectRanges(MachineInstr* MI, RangeSet& Entering, RangeSet& Internal,
+ RangeSet& Exiting, bool& hasRegMaskOp, SlotIndex OldIdx) {
+ hasRegMaskOp = false;
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& MO = *MOI;
+
+ if (MO.isRegMask()) {
+ hasRegMaskOp = true;
continue;
+ }
- unsigned reg = mop.getReg();
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue;
+
+ unsigned Reg = MO.getReg();
// TODO: Currently we're skipping uses that are reserved or have no
// interval, but we're not updating their kills. This should be
// fixed.
- if (!lis.hasInterval(reg) ||
- (TargetRegisterInfo::isPhysicalRegister(reg) && lis.isReserved(reg)))
+ if (!LIS.hasInterval(Reg) ||
+ (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
continue;
- if (mop.readsReg() && !ecs.count(reg)) {
- uses.insert(reg);
+ LiveInterval* LI = &LIS.getInterval(Reg);
+
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
}
- if (mop.isDef()) {
- if (mop.isDead()) {
- assert(!defs.count(reg) && "Can't mix defs with dead-defs.");
- deadDefs.insert(reg);
- } else if (mop.isEarlyClobber()) {
- uses.erase(reg);
- ecs.insert(reg);
+ if (MO.isDef()) {
+ if (MO.isEarlyClobber()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot(true));
+ assert(LR != 0 && "No EC range?");
+ if (LR->end > OldIdx.getDeadSlot())
+ Exiting.insert(std::make_pair(LI, LR));
+ else
+ Internal.insert(std::make_pair(LI, LR));
+ } else if (MO.isDead()) {
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getRegSlot());
+ assert(LR != 0 && "No dead-def range?");
+ Internal.insert(std::make_pair(LI, LR));
} else {
- assert(!deadDefs.count(reg) && "Can't mix defs with dead-defs.");
- defs.insert(reg);
+ LiveRange* LR = LI->getLiveRangeContaining(OldIdx.getDeadSlot());
+ assert(LR && LR->end > OldIdx.getDeadSlot() &&
+ "Non-dead-def should have live range exiting.");
+ Exiting.insert(std::make_pair(LI, LR));
}
}
}
+ }
- if (movingUp) {
- moveUsesUp(oldIdx, uses);
- moveECs(oldIdx, ecs);
- moveDeadDefs(oldIdx, deadDefs);
- moveDefs(oldIdx, defs);
- } else {
- moveDefs(oldIdx, defs);
- moveDeadDefs(oldIdx, deadDefs);
- moveECs(oldIdx, ecs);
- moveUsesDown(oldIdx, uses, mi->getParent());
+ // Collect IntRangePairs for all operands of MI that may need fixing.
+ void collectRangesInBundle(MachineInstr* MI, RangeSet& Entering,
+ RangeSet& Exiting, SlotIndex MIStartIdx,
+ SlotIndex MIEndIdx) {
+ for (MachineInstr::mop_iterator MOI = MI->operands_begin(),
+ MOE = MI->operands_end();
+ MOI != MOE; ++MOI) {
+ const MachineOperand& MO = *MOI;
+ assert(!MO.isRegMask() && "Can't have RegMasks in bundles.");
+ if (!MO.isReg() || MO.getReg() == 0)
+ continue;
+
+ unsigned Reg = MO.getReg();
+
+ // TODO: Currently we're skipping uses that are reserved or have no
+ // interval, but we're not updating their kills. This should be
+ // fixed.
+ if (!LIS.hasInterval(Reg) ||
+ (TargetRegisterInfo::isPhysicalRegister(Reg) && LIS.isReserved(Reg)))
+ continue;
+
+ LiveInterval* LI = &LIS.getInterval(Reg);
+
+ if (MO.readsReg()) {
+ LiveRange* LR = LI->getLiveRangeContaining(MIStartIdx);
+ if (LR != 0)
+ Entering.insert(std::make_pair(LI, LR));
+ }
+ if (MO.isDef()) {
+ assert(!MO.isEarlyClobber() && "Early clobbers not allowed in bundles.");
+ assert(!MO.isDead() && "Dead-defs not allowed in bundles.");
+ LiveRange* LR = LI->getLiveRangeContaining(MIEndIdx.getDeadSlot());
+ assert(LR != 0 && "Internal ranges not allowed in bundles.");
+ Exiting.insert(std::make_pair(LI, LR));
+ }
}
}
-private:
+ BundleRanges createBundleRanges(RangeSet& Entering, RangeSet& Internal, RangeSet& Exiting) {
+ BundleRanges BR;
-#ifndef NDEBUG
- bool intervalRangesSane(const LiveInterval& li) {
- if (li.empty()) {
- return true;
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI) {
+ LiveInterval* LI = EI->first;
+ LiveRange* LR = EI->second;
+ BR[LI->reg].Use = LR;
}
- SlotIndex lastEnd = li.begin()->start;
- for (LiveInterval::const_iterator lrItr = li.begin(), lrEnd = li.end();
- lrItr != lrEnd; ++lrItr) {
- const LiveRange& lr = *lrItr;
- if (lastEnd > lr.start || lr.start >= lr.end)
- return false;
- lastEnd = lr.end;
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II) {
+ LiveInterval* LI = II->first;
+ LiveRange* LR = II->second;
+ if (LR->end.isDead()) {
+ BR[LI->reg].Dead = LR;
+ } else {
+ BR[LI->reg].EC = LR;
+ }
+ }
+
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI) {
+ LiveInterval* LI = EI->first;
+ LiveRange* LR = EI->second;
+ BR[LI->reg].Def = LR;
}
- return true;
+ return BR;
}
-#endif
- void moveKillFlags(unsigned reg, SlotIndex oldIdx, SlotIndex newKillIdx) {
- MachineInstr* oldKillMI = lis.getInstructionFromIndex(oldIdx);
- if (!oldKillMI->killsRegister(reg))
+ void moveKillFlags(unsigned reg, SlotIndex OldIdx, SlotIndex newKillIdx) {
+ MachineInstr* OldKillMI = LIS.getInstructionFromIndex(OldIdx);
+ if (!OldKillMI->killsRegister(reg))
return; // Bail out if we don't have kill flags on the old register.
- MachineInstr* newKillMI = lis.getInstructionFromIndex(newKillIdx);
- assert(oldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
- assert(!newKillMI->killsRegister(reg) && "New kill instr is already a kill.");
- oldKillMI->clearRegisterKills(reg, lis.tri_);
- newKillMI->addRegisterKilled(reg, lis.tri_);
+ MachineInstr* NewKillMI = LIS.getInstructionFromIndex(newKillIdx);
+ assert(OldKillMI->killsRegister(reg) && "Old 'kill' instr isn't a kill.");
+ assert(!NewKillMI->killsRegister(reg) && "New kill instr is already a kill.");
+ OldKillMI->clearRegisterKills(reg, &TRI);
+ NewKillMI->addRegisterKilled(reg, &TRI);
+ }
+
+ void updateRegMaskSlots(SlotIndex OldIdx) {
+ SmallVectorImpl<SlotIndex>::iterator RI =
+ std::lower_bound(LIS.RegMaskSlots.begin(), LIS.RegMaskSlots.end(),
+ OldIdx);
+ assert(*RI == OldIdx && "No RegMask at OldIdx.");
+ *RI = NewIdx;
+ assert(*prior(RI) < *RI && *RI < *next(RI) &&
+ "RegSlots out of order. Did you move one call across another?");
}
- template <typename DefSetT>
- void moveDefs(SlotIndex oldIdx, const DefSetT& defs) {
- typedef typename DefSetT::const_iterator DefItr;
- for (DefItr di = defs.begin(), de = defs.end(); di != de; ++di) {
- unsigned def = *di;
- LiveInterval& li = lis.getInterval(def);
- LiveRange* lr = li.getLiveRangeContaining(oldIdx.getRegSlot());
- assert(lr != 0 && "No range?");
- moveDef(*lr, li);
+ // Return the last use of reg between NewIdx and OldIdx.
+ SlotIndex findLastUseBefore(unsigned Reg, SlotIndex OldIdx) {
+ SlotIndex LastUse = NewIdx;
+ for (MachineRegisterInfo::use_nodbg_iterator
+ UI = MRI.use_nodbg_begin(Reg),
+ UE = MRI.use_nodbg_end();
+ UI != UE; UI.skipInstruction()) {
+ const MachineInstr* MI = &*UI;
+ SlotIndex InstSlot = LIS.getSlotIndexes()->getInstructionIndex(MI);
+ if (InstSlot > LastUse && InstSlot < OldIdx)
+ LastUse = InstSlot;
}
+ return LastUse;
+ }
+
+ void moveEnteringUpFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ bool LiveThrough = LR->end > OldIdx.getRegSlot();
+ if (LiveThrough)
+ return;
+ SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
+ if (LastUse != NewIdx)
+ moveKillFlags(LI->reg, NewIdx, LastUse);
+ LR->end = LastUse.getRegSlot();
}
- template <typename DeadDefSetT>
- void moveDeadDefs(SlotIndex oldIdx, const DeadDefSetT& deadDefs) {
- typedef typename DeadDefSetT::const_iterator DeadDefItr;
- for (DeadDefItr di = deadDefs.begin(),de = deadDefs.end(); di != de; ++di) {
- unsigned deadDef = *di;
- LiveInterval& li = lis.getInterval(deadDef);
- LiveRange* lr = li.getLiveRangeContaining(oldIdx.getRegSlot());
- assert(lr != 0 && "No range for dead def?");
- assert(lr->start == oldIdx.getRegSlot() && "Bad dead range start?");
- assert(lr->end == oldIdx.getDeadSlot() && "Bad dead range end?");
- assert(lr->valno->def == oldIdx.getRegSlot() && "Bad dead valno def.");
- moveDeadDefOrEC(*lr, li, false);
+ void moveEnteringDownFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ // Extend the LiveRange if NewIdx is past the end.
+ if (NewIdx > LR->end) {
+ // Move kill flags if OldIdx was not originally the end
+ // (otherwise LR->end points to an invalid slot).
+ if (LR->end.getRegSlot() != OldIdx.getRegSlot()) {
+ assert(LR->end > OldIdx && "LiveRange does not cover original slot");
+ moveKillFlags(LI->reg, LR->end, NewIdx);
+ }
+ LR->end = NewIdx.getRegSlot();
}
}
- template <typename ECSetT>
- void moveECs(SlotIndex oldIdx, const ECSetT& ecs) {
- typedef typename ECSetT::const_iterator ECItr;
- for (ECItr eci = ecs.begin(), ece = ecs.end(); eci != ece; ++eci) {
- unsigned ec = *eci;
- LiveInterval& li = lis.getInterval(ec);
- LiveRange* lr = li.getLiveRangeContaining(oldIdx.getRegSlot(true));
- assert(lr != 0 && "No range for early clobber?");
- assert(lr->start == oldIdx.getRegSlot(true) && "Bad EC range start?");
- assert(lr->end == oldIdx.getRegSlot() && "Bad EC range end.");
- assert(lr->valno->def == oldIdx.getRegSlot(true) && "Bad EC valno def.");
- moveDeadDefOrEC(*lr, li, true);
+ void moveAllEnteringFrom(SlotIndex OldIdx, RangeSet& Entering) {
+ bool GoingUp = NewIdx < OldIdx;
+
+ if (GoingUp) {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringUpFrom(OldIdx, *EI);
+ } else {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringDownFrom(OldIdx, *EI);
}
}
- template <typename UseSetT>
- void moveUsesUp(SlotIndex oldIdx, const UseSetT &uses) {
- typedef typename UseSetT::const_iterator UseItr;
- for (UseItr ui = uses.begin(), ue = uses.end(); ui != ue; ++ui) {
- unsigned use = *ui;
- LiveInterval& li = lis.getInterval(use);
- LiveRange* lr = li.getLiveRangeBefore(oldIdx.getRegSlot());
- assert(lr != 0 && "No range for use?");
- moveUseUp(oldIdx, *lr, li);
+ void moveInternalFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ LR->end <= OldIdx.getDeadSlot() &&
+ "Range should be internal to OldIdx.");
+ LiveRange Tmp(*LR);
+ Tmp.start = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ Tmp.valno->def = Tmp.start;
+ Tmp.end = LR->end.isDead() ? NewIdx.getDeadSlot() : NewIdx.getRegSlot();
+ LI->removeRange(*LR);
+ LI->addRange(Tmp);
+ }
+
+ void moveAllInternalFrom(SlotIndex OldIdx, RangeSet& Internal) {
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II)
+ moveInternalFrom(OldIdx, *II);
+ }
+
+ void moveExitingFrom(SlotIndex OldIdx, IntRangePair& P) {
+ LiveRange* LR = P.second;
+ assert(OldIdx < LR->start && LR->start < OldIdx.getDeadSlot() &&
+ "Range should start in OldIdx.");
+ assert(LR->end > OldIdx.getDeadSlot() && "Range should exit OldIdx.");
+ SlotIndex NewStart = NewIdx.getRegSlot(LR->start.isEarlyClobber());
+ LR->start = NewStart;
+ LR->valno->def = NewStart;
+ }
+
+ void moveAllExitingFrom(SlotIndex OldIdx, RangeSet& Exiting) {
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI)
+ moveExitingFrom(OldIdx, *EI);
+ }
+
+ void moveEnteringUpFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ bool LiveThrough = LR->end > OldIdx.getRegSlot();
+ if (LiveThrough) {
+ assert((LR->start < NewIdx || BR[LI->reg].Def == LR) &&
+ "Def in bundle should be def range.");
+ assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
+ "If bundle has use for this reg it should be LR.");
+ BR[LI->reg].Use = LR;
+ return;
}
+
+ SlotIndex LastUse = findLastUseBefore(LI->reg, OldIdx);
+ moveKillFlags(LI->reg, OldIdx, LastUse);
+
+ if (LR->start < NewIdx) {
+ // Becoming a new entering range.
+ assert(BR[LI->reg].Dead == 0 && BR[LI->reg].Def == 0 &&
+ "Bundle shouldn't be re-defining reg mid-range.");
+ assert((BR[LI->reg].Use == 0 || BR[LI->reg].Use == LR) &&
+ "Bundle shouldn't have different use range for same reg.");
+ LR->end = LastUse.getRegSlot();
+ BR[LI->reg].Use = LR;
+ } else {
+ // Becoming a new Dead-def.
+ assert(LR->start == NewIdx.getRegSlot(LR->start.isEarlyClobber()) &&
+ "Live range starting at unexpected slot.");
+ assert(BR[LI->reg].Def == LR && "Reg should have def range.");
+ assert(BR[LI->reg].Dead == 0 &&
+ "Can't have def and dead def of same reg in a bundle.");
+ LR->end = LastUse.getDeadSlot();
+ BR[LI->reg].Dead = BR[LI->reg].Def;
+ BR[LI->reg].Def = 0;
+ }
+ }
+
+ void moveEnteringDownFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+ if (NewIdx > LR->end) {
+ // Range extended to bundle. Add to bundle uses.
+ // Note: Currently adds kill flags to bundle start.
+ assert(BR[LI->reg].Use == 0 &&
+ "Bundle already has use range for reg.");
+ moveKillFlags(LI->reg, LR->end, NewIdx);
+ LR->end = NewIdx.getRegSlot();
+ BR[LI->reg].Use = LR;
+ } else {
+ assert(BR[LI->reg].Use != 0 &&
+ "Bundle should already have a use range for reg.");
+ }
+ }
+
+ void moveAllEnteringFromInto(SlotIndex OldIdx, RangeSet& Entering,
+ BundleRanges& BR) {
+ bool GoingUp = NewIdx < OldIdx;
+
+ if (GoingUp) {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringUpFromInto(OldIdx, *EI, BR);
+ } else {
+ for (RangeSet::iterator EI = Entering.begin(), EE = Entering.end();
+ EI != EE; ++EI)
+ moveEnteringDownFromInto(OldIdx, *EI, BR);
+ }
+ }
+
+ void moveInternalFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ // TODO: Sane rules for moving ranges into bundles.
}
- template <typename UseSetT>
- void moveUsesDown(SlotIndex oldIdx, const UseSetT &uses,
- const MachineBasicBlock* mbb) {
- typedef typename UseSetT::const_iterator UseItr;
- for (UseItr ui = uses.begin(), ue = uses.end(); ui != ue; ++ui) {
- unsigned use = *ui;
- LiveInterval& li = lis.getInterval(use);
- LiveRange* lr = li.getLiveRangeBefore(oldIdx.getRegSlot());
- assert(lr != 0 && "No range for use?");
- moveUseDown(oldIdx, *lr, li, mbb);
+ void moveAllInternalFromInto(SlotIndex OldIdx, RangeSet& Internal,
+ BundleRanges& BR) {
+ for (RangeSet::iterator II = Internal.begin(), IE = Internal.end();
+ II != IE; ++II)
+ moveInternalFromInto(OldIdx, *II, BR);
+ }
+
+ void moveExitingFromInto(SlotIndex OldIdx, IntRangePair& P,
+ BundleRanges& BR) {
+ LiveInterval* LI = P.first;
+ LiveRange* LR = P.second;
+
+ assert(LR->start.isRegister() &&
+ "Don't know how to merge exiting ECs into bundles yet.");
+
+ if (LR->end > NewIdx.getDeadSlot()) {
+ // This range is becoming an exiting range on the bundle.
+ // If there was an old dead-def of this reg, delete it.
+ if (BR[LI->reg].Dead != 0) {
+ LI->removeRange(*BR[LI->reg].Dead);
+ BR[LI->reg].Dead = 0;
+ }
+ assert(BR[LI->reg].Def == 0 &&
+ "Can't have two defs for the same variable exiting a bundle.");
+ LR->start = NewIdx.getRegSlot();
+ LR->valno->def = LR->start;
+ BR[LI->reg].Def = LR;
+ } else {
+ // This range is becoming internal to the bundle.
+ assert(LR->end == NewIdx.getRegSlot() &&
+ "Can't bundle def whose kill is before the bundle");
+ if (BR[LI->reg].Dead || BR[LI->reg].Def) {
+ // Already have a def for this. Just delete range.
+ LI->removeRange(*LR);
+ } else {
+ // Make range dead, record.
+ LR->end = NewIdx.getDeadSlot();
+ BR[LI->reg].Dead = LR;
+ assert(BR[LI->reg].Use == LR &&
+ "Range becoming dead should currently be use.");
+ }
+ // In both cases the range is no longer a use on the bundle.
+ BR[LI->reg].Use = 0;
}
}
+
+ void moveAllExitingFromInto(SlotIndex OldIdx, RangeSet& Exiting,
+ BundleRanges& BR) {
+ for (RangeSet::iterator EI = Exiting.begin(), EE = Exiting.end();
+ EI != EE; ++EI)
+ moveExitingFromInto(OldIdx, *EI, BR);
+ }
+
};
-void LiveIntervals::handleMove(MachineInstr* mi) {
- SlotIndex oldIndex = indexes_->getInstructionIndex(mi);
- indexes_->removeMachineInstrFromMaps(mi);
- SlotIndex newIndex = mi->isInsideBundle() ?
- indexes_->getInstructionIndex(mi->getBundleStart()) :
- indexes_->insertMachineInstrInMaps(mi);
- MachineBasicBlock* mbb = mi->getParent();
- assert(getMBBStartIdx(mbb) <= oldIndex && oldIndex < getMBBEndIdx(mbb) &&
+void LiveIntervals::handleMove(MachineInstr* MI) {
+ SlotIndex OldIndex = indexes_->getInstructionIndex(MI);
+ indexes_->removeMachineInstrFromMaps(MI);
+ SlotIndex NewIndex = MI->isInsideBundle() ?
+ indexes_->getInstructionIndex(MI) :
+ indexes_->insertMachineInstrInMaps(MI);
+ assert(getMBBStartIdx(MI->getParent()) <= OldIndex &&
+ OldIndex < getMBBEndIdx(MI->getParent()) &&
"Cannot handle moves across basic block boundaries.");
- assert(!mi->isBundled() && "Can't handle bundled instructions yet.");
+ assert(!MI->isBundled() && "Can't handle bundled instructions yet.");
+
+ HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HME.moveAllRangesFrom(MI, OldIndex);
+}
- HMEditor hme(*this, newIndex);
- hme.moveAllOperands(mi, oldIndex);
+void LiveIntervals::handleMoveIntoBundle(MachineInstr* MI, MachineInstr* BundleStart) {
+ SlotIndex NewIndex = indexes_->getInstructionIndex(BundleStart);
+ HMEditor HME(*this, *mri_, *tri_, NewIndex);
+ HME.moveAllRangesInto(MI, BundleStart);
}