-
- LHSValNoAssignments.resize(LHS.getNumValNums(), -1);
- RHSValNoAssignments.resize(RHS.getNumValNums(), -1);
- ValueNumberInfo.resize(LHS.getNumValNums());
-
- // Okay, *all* of the values in LHS that are defined as a copy from RHS
- // should now get updated.
- for (unsigned VN = 0, e = LHS.getNumValNums(); VN != e; ++VN) {
- if (unsigned LHSSrcReg = LHS.getSrcRegForValNum(VN)) {
- if (rep(LHSSrcReg) != RHS.reg) {
- // If this is not a copy from the RHS, its value number will be
- // unmodified by the coallescing.
- ValueNumberInfo[VN] = LHS.getValNumInfo(VN);
- LHSValNoAssignments[VN] = VN;
- } else if (RHSValID == -1) {
- // Otherwise, it is a copy from the RHS, and we don't already have a
- // value# for it. Keep the current value number, but remember it.
- LHSValNoAssignments[VN] = RHSValID = VN;
- ValueNumberInfo[VN] = RHSValNoInfo;
- } else {
- // Otherwise, use the specified value #.
- LHSValNoAssignments[VN] = RHSValID;
- if (VN != (unsigned)RHSValID)
- ValueNumberInfo[VN].first = ~1U;
- else
- ValueNumberInfo[VN] = RHSValNoInfo;
+ if (HasDef) {
+ LiveRange LR(getDefIndex(index), getStoreIndex(index),
+ nI.getNextValue(~0U, 0, VNInfoAllocator));
+ DOUT << " +" << LR;
+ nI.addRange(LR);
+ }
+
+ DOUT << "\t\t\t\tAdded new interval: ";
+ nI.print(DOUT, tri_);
+ DOUT << '\n';
+ }
+ return CanFold;
+}
+bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
+ const VNInfo *VNI,
+ MachineBasicBlock *MBB, unsigned Idx) const {
+ unsigned End = getMBBEndIdx(MBB);
+ for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
+ unsigned KillIdx = VNI->kills[j];
+ if (KillIdx > Idx && KillIdx < End)
+ return true;
+ }
+ return false;
+}
+
+/// RewriteInfo - Keep track of machine instrs that will be rewritten
+/// during spilling.
+namespace {
+ struct RewriteInfo {
+ unsigned Index;
+ MachineInstr *MI;
+ bool HasUse;
+ bool HasDef;
+ RewriteInfo(unsigned i, MachineInstr *mi, bool u, bool d)
+ : Index(i), MI(mi), HasUse(u), HasDef(d) {}
+ };
+
+ struct RewriteInfoCompare {
+ bool operator()(const RewriteInfo &LHS, const RewriteInfo &RHS) const {
+ return LHS.Index < RHS.Index;
+ }
+ };
+}
+
+void LiveIntervals::
+rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
+ LiveInterval::Ranges::const_iterator &I,
+ MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
+ unsigned Slot, int LdSlot,
+ bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
+ VirtRegMap &vrm,
+ const TargetRegisterClass* rc,
+ SmallVector<int, 4> &ReMatIds,
+ const MachineLoopInfo *loopInfo,
+ BitVector &SpillMBBs,
+ std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
+ BitVector &RestoreMBBs,
+ std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
+ std::map<unsigned,unsigned> &MBBVRegsMap,
+ std::vector<LiveInterval*> &NewLIs, float &SSWeight) {
+ bool AllCanFold = true;
+ unsigned NewVReg = 0;
+ unsigned start = getBaseIndex(I->start);
+ unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
+
+ // First collect all the def / use in this live range that will be rewritten.
+ // Make sure they are sorted according to instruction index.
+ std::vector<RewriteInfo> RewriteMIs;
+ for (MachineRegisterInfo::reg_iterator ri = mri_->reg_begin(li.reg),
+ re = mri_->reg_end(); ri != re; ) {
+ MachineInstr *MI = &*ri;
+ MachineOperand &O = ri.getOperand();
+ ++ri;
+ assert(!O.isImplicit() && "Spilling register that's used as implicit use?");
+ unsigned index = getInstructionIndex(MI);
+ if (index < start || index >= end)
+ continue;
+ RewriteMIs.push_back(RewriteInfo(index, MI, O.isUse(), O.isDef()));
+ }
+ std::sort(RewriteMIs.begin(), RewriteMIs.end(), RewriteInfoCompare());
+
+ unsigned ImpUse = DefIsReMat ? getReMatImplicitUse(li, ReMatDefMI) : 0;
+ // Now rewrite the defs and uses.
+ for (unsigned i = 0, e = RewriteMIs.size(); i != e; ) {
+ RewriteInfo &rwi = RewriteMIs[i];
+ ++i;
+ unsigned index = rwi.Index;
+ bool MIHasUse = rwi.HasUse;
+ bool MIHasDef = rwi.HasDef;
+ MachineInstr *MI = rwi.MI;
+ // If MI def and/or use the same register multiple times, then there
+ // are multiple entries.
+ unsigned NumUses = MIHasUse;
+ while (i != e && RewriteMIs[i].MI == MI) {
+ assert(RewriteMIs[i].Index == index);
+ bool isUse = RewriteMIs[i].HasUse;
+ if (isUse) ++NumUses;
+ MIHasUse |= isUse;
+ MIHasDef |= RewriteMIs[i].HasDef;
+ ++i;
+ }
+ MachineBasicBlock *MBB = MI->getParent();
+
+ if (ImpUse && MI != ReMatDefMI) {
+ // Re-matting an instruction with virtual register use. Update the
+ // register interval's spill weight to HUGE_VALF to prevent it from
+ // being spilled.
+ LiveInterval &ImpLi = getInterval(ImpUse);
+ ImpLi.weight = HUGE_VALF;
+ }
+
+ unsigned MBBId = MBB->getNumber();
+ unsigned ThisVReg = 0;
+ if (TrySplit) {
+ std::map<unsigned,unsigned>::const_iterator NVI = MBBVRegsMap.find(MBBId);
+ if (NVI != MBBVRegsMap.end()) {
+ ThisVReg = NVI->second;
+ // One common case:
+ // x = use
+ // ...
+ // ...
+ // def = ...
+ // = use
+ // It's better to start a new interval to avoid artifically
+ // extend the new interval.
+ if (MIHasDef && !MIHasUse) {
+ MBBVRegsMap.erase(MBB->getNumber());
+ ThisVReg = 0;