+unsigned PreAllocSplitting::getNumberOfNonSpills(
+ SmallPtrSet<MachineInstr*, 4>& MIs,
+ unsigned Reg, int FrameIndex,
+ bool& FeedsTwoAddr) {
+ unsigned NonSpills = 0;
+ for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
+ UI != UE; ++UI) {
+ int StoreFrameIndex;
+ unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
+ if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
+ NonSpills++;
+
+ int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
+ if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
+ FeedsTwoAddr = true;
+ }
+
+ return NonSpills;
+}
+
+/// removeDeadSpills - After doing splitting, filter through all intervals we've
+/// split, and see if any of the spills are unnecessary. If so, remove them.
+bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
+ bool changed = false;
+
+ // Walk over all of the live intervals that were touched by the splitter,
+ // and see if we can do any DCE and/or folding.
+ for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
+ LE = split.end(); LI != LE; ++LI) {
+ DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
+
+ // First, collect all the uses of the vreg, and sort them by their
+ // reaching definition (VNInfo).
+ for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
+ UE = MRI->use_end(); UI != UE; ++UI) {
+ unsigned index = LIs->getInstructionIndex(&*UI);
+ index = LiveIntervals::getUseIndex(index);
+
+ const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
+ VNUseCount[LR->valno].insert(&*UI);
+ }
+
+ // Now, take the definitions (VNInfo's) one at a time and try to DCE
+ // and/or fold them away.
+ for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
+ VE = (*LI)->vni_end(); VI != VE; ++VI) {
+
+ if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit)
+ return changed;
+
+ VNInfo* CurrVN = *VI;
+
+ // We don't currently try to handle definitions with PHI kills, because
+ // it would involve processing more than one VNInfo at once.
+ if (CurrVN->hasPHIKill()) continue;
+
+ // We also don't try to handle the results of PHI joins, since there's
+ // no defining instruction to analyze.
+ if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
+
+ // We're only interested in eliminating cruft introduced by the splitter,
+ // is of the form load-use or load-use-store. First, check that the
+ // definition is a load, and remember what stack slot we loaded it from.
+ MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
+ int FrameIndex;
+ if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
+
+ // If the definition has no uses at all, just DCE it.
+ if (VNUseCount[CurrVN].size() == 0) {
+ LIs->RemoveMachineInstrFromMaps(DefMI);
+ (*LI)->removeValNo(CurrVN);
+ DefMI->eraseFromParent();
+ VNUseCount.erase(CurrVN);
+ NumDeadSpills++;
+ changed = true;
+ continue;
+ }
+
+ // Second, get the number of non-store uses of the definition, as well as
+ // a flag indicating whether it feeds into a later two-address definition.
+ bool FeedsTwoAddr = false;
+ unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
+ (*LI)->reg, FrameIndex,
+ FeedsTwoAddr);
+
+ // If there's one non-store use and it doesn't feed a two-addr, then
+ // this is a load-use-store case that we can try to fold.
+ if (NonSpillCount == 1 && !FeedsTwoAddr) {
+ // Start by finding the non-store use MachineInstr.
+ SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
+ int StoreFrameIndex;
+ unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
+ while (UI != VNUseCount[CurrVN].end() &&
+ (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
+ ++UI;
+ if (UI != VNUseCount[CurrVN].end())
+ StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
+ }
+ if (UI == VNUseCount[CurrVN].end()) continue;
+
+ MachineInstr* use = *UI;
+
+ // Attempt to fold it away!
+ int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
+ if (OpIdx == -1) continue;
+ SmallVector<unsigned, 1> Ops;
+ Ops.push_back(OpIdx);
+ if (!TII->canFoldMemoryOperand(use, Ops)) continue;
+
+ MachineInstr* NewMI =
+ TII->foldMemoryOperand(*use->getParent()->getParent(),
+ use, Ops, FrameIndex);
+
+ if (!NewMI) continue;
+
+ // Update relevant analyses.
+ LIs->RemoveMachineInstrFromMaps(DefMI);
+ LIs->ReplaceMachineInstrInMaps(use, NewMI);
+ (*LI)->removeValNo(CurrVN);
+
+ DefMI->eraseFromParent();
+ MachineBasicBlock* MBB = use->getParent();
+ NewMI = MBB->insert(MBB->erase(use), NewMI);
+ VNUseCount[CurrVN].erase(use);
+
+ // Remove deleted instructions. Note that we need to remove them from
+ // the VNInfo->use map as well, just to be safe.
+ for (SmallPtrSet<MachineInstr*, 4>::iterator II =
+ VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
+ II != IE; ++II) {
+ for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
+ VNI = VNUseCount.begin(), VNE = VNUseCount.end(); VNI != VNE;
+ ++VNI)
+ if (VNI->first != CurrVN)
+ VNI->second.erase(*II);
+ LIs->RemoveMachineInstrFromMaps(*II);
+ (*II)->eraseFromParent();
+ }
+
+ VNUseCount.erase(CurrVN);
+
+ for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
+ VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
+ if (VI->second.erase(use))
+ VI->second.insert(NewMI);
+
+ NumDeadSpills++;
+ changed = true;
+ continue;
+ }
+
+ // If there's more than one non-store instruction, we can't profitably
+ // fold it, so bail.
+ if (NonSpillCount) continue;
+
+ // Otherwise, this is a load-store case, so DCE them.
+ for (SmallPtrSet<MachineInstr*, 4>::iterator UI =
+ VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
+ UI != UI; ++UI) {
+ LIs->RemoveMachineInstrFromMaps(*UI);
+ (*UI)->eraseFromParent();
+ }
+
+ VNUseCount.erase(CurrVN);
+
+ LIs->RemoveMachineInstrFromMaps(DefMI);
+ (*LI)->removeValNo(CurrVN);
+ DefMI->eraseFromParent();
+ NumDeadSpills++;
+ changed = true;
+ }
+ }
+
+ return changed;
+}
+
+bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
+ MachineBasicBlock* DefMBB,
+ MachineBasicBlock* BarrierMBB) {
+ if (DefMBB == BarrierMBB)
+ return false;
+
+ if (LR->valno->hasPHIKill())
+ return false;
+
+ unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
+ if (LR->end < MBBEnd)
+ return false;
+
+ MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
+ if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
+ return true;
+
+ MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
+ SmallPtrSet<MachineBasicBlock*, 4> Visited;
+ typedef std::pair<MachineBasicBlock*,
+ MachineBasicBlock::succ_iterator> ItPair;
+ SmallVector<ItPair, 4> Stack;
+ Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
+
+ while (!Stack.empty()) {
+ ItPair P = Stack.back();
+ Stack.pop_back();
+
+ MachineBasicBlock* PredMBB = P.first;
+ MachineBasicBlock::succ_iterator S = P.second;
+
+ if (S == PredMBB->succ_end())
+ continue;
+ else if (Visited.count(*S)) {
+ Stack.push_back(std::make_pair(PredMBB, ++S));
+ continue;
+ } else
+ Stack.push_back(std::make_pair(PredMBB, S+1));
+
+ MachineBasicBlock* MBB = *S;
+ Visited.insert(MBB);
+
+ if (MBB == BarrierMBB)
+ return true;
+
+ MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
+ MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
+ MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
+ while (MDTN) {
+ if (MDTN == DefMDTN)
+ return true;
+ else if (MDTN == BarrierMDTN)
+ break;
+ MDTN = MDTN->getIDom();
+ }
+
+ MBBEnd = LIs->getMBBEndIdx(MBB);
+ if (LR->end > MBBEnd)
+ Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
+ }
+
+ return false;
+}
+
+