1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
19 #include "llvm/CodeGen/LiveStackAnalysis.h"
20 #include "llvm/CodeGen/MachineDominators.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunctionPass.h"
23 #include "llvm/CodeGen/MachineLoopInfo.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/Passes.h"
26 #include "llvm/CodeGen/RegisterCoalescer.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Target/TargetRegisterInfo.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/DenseMap.h"
34 #include "llvm/ADT/DepthFirstIterator.h"
35 #include "llvm/ADT/SmallPtrSet.h"
36 #include "llvm/ADT/Statistic.h"
39 static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
40 static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1), cl::Hidden);
42 STATISTIC(NumSplits, "Number of intervals split");
43 STATISTIC(NumRemats, "Number of intervals split by rematerialization");
44 STATISTIC(NumFolds, "Number of intervals split with spill folding");
45 STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
46 STATISTIC(NumDeadSpills, "Number of dead spills removed");
49 class VISIBILITY_HIDDEN PreAllocSplitting : public MachineFunctionPass {
50 MachineFunction *CurrMF;
51 const TargetMachine *TM;
52 const TargetInstrInfo *TII;
53 MachineFrameInfo *MFI;
54 MachineRegisterInfo *MRI;
58 // Barrier - Current barrier being processed.
59 MachineInstr *Barrier;
61 // BarrierMBB - Basic block where the barrier resides in.
62 MachineBasicBlock *BarrierMBB;
64 // Barrier - Current barrier index.
67 // CurrLI - Current live interval being split.
70 // CurrSLI - Current stack slot live interval.
71 LiveInterval *CurrSLI;
73 // CurrSValNo - Current val# for the stack slot live interval.
76 // IntervalSSMap - A map from live interval to spill slots.
77 DenseMap<unsigned, int> IntervalSSMap;
79 // Def2SpillMap - A map from a def instruction index to spill index.
80 DenseMap<unsigned, unsigned> Def2SpillMap;
84 PreAllocSplitting() : MachineFunctionPass(&ID) {}
86 virtual bool runOnMachineFunction(MachineFunction &MF);
88 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
89 AU.addRequired<LiveIntervals>();
90 AU.addPreserved<LiveIntervals>();
91 AU.addRequired<LiveStacks>();
92 AU.addPreserved<LiveStacks>();
93 AU.addPreserved<RegisterCoalescer>();
95 AU.addPreservedID(StrongPHIEliminationID);
97 AU.addPreservedID(PHIEliminationID);
98 AU.addRequired<MachineDominatorTree>();
99 AU.addRequired<MachineLoopInfo>();
100 AU.addPreserved<MachineDominatorTree>();
101 AU.addPreserved<MachineLoopInfo>();
102 MachineFunctionPass::getAnalysisUsage(AU);
105 virtual void releaseMemory() {
106 IntervalSSMap.clear();
107 Def2SpillMap.clear();
110 virtual const char *getPassName() const {
111 return "Pre-Register Allocaton Live Interval Splitting";
114 /// print - Implement the dump method.
115 virtual void print(std::ostream &O, const Module* M = 0) const {
119 void print(std::ostream *O, const Module* M = 0) const {
124 MachineBasicBlock::iterator
125 findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
128 MachineBasicBlock::iterator
129 findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
130 SmallPtrSet<MachineInstr*, 4>&, unsigned&);
132 MachineBasicBlock::iterator
133 findRestorePoint(MachineBasicBlock*, MachineInstr*, unsigned,
134 SmallPtrSet<MachineInstr*, 4>&, unsigned&);
136 int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
138 bool IsAvailableInStack(MachineBasicBlock*, unsigned, unsigned, unsigned,
139 unsigned&, int&) const;
141 void UpdateSpillSlotInterval(VNInfo*, unsigned, unsigned);
143 bool SplitRegLiveInterval(LiveInterval*);
145 bool SplitRegLiveIntervals(const TargetRegisterClass **,
146 SmallPtrSet<LiveInterval*, 8>&);
148 bool createsNewJoin(LiveRange* LR, MachineBasicBlock* DefMBB,
149 MachineBasicBlock* BarrierMBB);
150 bool Rematerialize(unsigned vreg, VNInfo* ValNo,
152 MachineBasicBlock::iterator RestorePt,
154 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
155 MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
157 MachineInstr* Barrier,
158 MachineBasicBlock* MBB,
160 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
161 void RenumberValno(VNInfo* VN);
162 void ReconstructLiveInterval(LiveInterval* LI);
163 bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
164 unsigned getNumberOfNonSpills(SmallPtrSet<MachineInstr*, 4>& MIs,
165 unsigned Reg, int FrameIndex, bool& TwoAddr);
166 VNInfo* PerformPHIConstruction(MachineBasicBlock::iterator use,
167 MachineBasicBlock* MBB,
169 SmallPtrSet<MachineInstr*, 4>& Visited,
170 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
171 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
172 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
173 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
174 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
175 bool toplevel, bool intrablock);
177 } // end anonymous namespace
179 char PreAllocSplitting::ID = 0;
181 static RegisterPass<PreAllocSplitting>
182 X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
184 const PassInfo *const llvm::PreAllocSplittingID = &X;
187 /// findNextEmptySlot - Find a gap after the given machine instruction in the
188 /// instruction index map. If there isn't one, return end().
189 MachineBasicBlock::iterator
190 PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
191 unsigned &SpotIndex) {
192 MachineBasicBlock::iterator MII = MI;
193 if (++MII != MBB->end()) {
194 unsigned Index = LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
203 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
204 /// for spilling the current live interval. The index must be before any
205 /// defs and uses of the live interval register in the mbb. Return begin() if
207 MachineBasicBlock::iterator
208 PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
210 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
211 unsigned &SpillIndex) {
212 MachineBasicBlock::iterator Pt = MBB->begin();
214 // Go top down if RefsInMBB is empty.
215 if (RefsInMBB.empty() && !DefMI) {
216 MachineBasicBlock::iterator MII = MBB->begin();
217 MachineBasicBlock::iterator EndPt = MI;
220 unsigned Index = LIs->getInstructionIndex(MII);
221 unsigned Gap = LIs->findGapBeforeInstr(Index);
227 } while (MII != EndPt);
229 MachineBasicBlock::iterator MII = MI;
230 MachineBasicBlock::iterator EndPt = DefMI
231 ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
232 while (MII != EndPt && !RefsInMBB.count(MII)) {
233 unsigned Index = LIs->getInstructionIndex(MII);
234 if (LIs->hasGapBeforeInstr(Index)) {
236 SpillIndex = LIs->findGapBeforeInstr(Index, true);
245 /// findRestorePoint - Find a gap in the instruction index map that's suitable
246 /// for restoring the current live interval value. The index must be before any
247 /// uses of the live interval register in the mbb. Return end() if none is
249 MachineBasicBlock::iterator
250 PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
252 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
253 unsigned &RestoreIndex) {
254 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
255 // begin index accordingly.
256 MachineBasicBlock::iterator Pt = MBB->end();
257 unsigned EndIdx = LIs->getMBBEndIdx(MBB);
259 // Go bottom up if RefsInMBB is empty and the end of the mbb isn't beyond
260 // the last index in the live range.
261 if (RefsInMBB.empty() && LastIdx >= EndIdx) {
262 MachineBasicBlock::iterator MII = MBB->getFirstTerminator();
263 MachineBasicBlock::iterator EndPt = MI;
266 unsigned Index = LIs->getInstructionIndex(MII);
267 unsigned Gap = LIs->findGapBeforeInstr(Index);
274 } while (MII != EndPt);
276 MachineBasicBlock::iterator MII = MI;
278 // FIXME: Limit the number of instructions to examine to reduce
280 while (MII != MBB->end()) {
281 unsigned Index = LIs->getInstructionIndex(MII);
284 unsigned Gap = LIs->findGapBeforeInstr(Index);
289 if (RefsInMBB.count(MII))
298 /// CreateSpillStackSlot - Create a stack slot for the live interval being
299 /// split. If the live interval was previously split, just reuse the same
301 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
302 const TargetRegisterClass *RC) {
304 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
305 if (I != IntervalSSMap.end()) {
308 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
309 IntervalSSMap[Reg] = SS;
312 // Create live interval for stack slot.
313 CurrSLI = &LSs->getOrCreateInterval(SS);
314 if (CurrSLI->hasAtLeastOneValue())
315 CurrSValNo = CurrSLI->getValNumInfo(0);
317 CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
321 /// IsAvailableInStack - Return true if register is available in a split stack
322 /// slot at the specified index.
324 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
325 unsigned Reg, unsigned DefIndex,
326 unsigned RestoreIndex, unsigned &SpillIndex,
331 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
332 if (I == IntervalSSMap.end())
334 DenseMap<unsigned, unsigned>::iterator II = Def2SpillMap.find(DefIndex);
335 if (II == Def2SpillMap.end())
338 // If last spill of def is in the same mbb as barrier mbb (where restore will
339 // be), make sure it's not below the intended restore index.
340 // FIXME: Undo the previous spill?
341 assert(LIs->getMBBFromIndex(II->second) == DefMBB);
342 if (DefMBB == BarrierMBB && II->second >= RestoreIndex)
346 SpillIndex = II->second;
350 /// UpdateSpillSlotInterval - Given the specified val# of the register live
351 /// interval being split, and the spill and restore indicies, update the live
352 /// interval of the spill stack slot.
354 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, unsigned SpillIndex,
355 unsigned RestoreIndex) {
356 assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
357 "Expect restore in the barrier mbb");
359 MachineBasicBlock *MBB = LIs->getMBBFromIndex(SpillIndex);
360 if (MBB == BarrierMBB) {
361 // Intra-block spill + restore. We are done.
362 LiveRange SLR(SpillIndex, RestoreIndex, CurrSValNo);
363 CurrSLI->addRange(SLR);
367 SmallPtrSet<MachineBasicBlock*, 4> Processed;
368 unsigned EndIdx = LIs->getMBBEndIdx(MBB);
369 LiveRange SLR(SpillIndex, EndIdx+1, CurrSValNo);
370 CurrSLI->addRange(SLR);
371 Processed.insert(MBB);
373 // Start from the spill mbb, figure out the extend of the spill slot's
375 SmallVector<MachineBasicBlock*, 4> WorkList;
376 const LiveRange *LR = CurrLI->getLiveRangeContaining(SpillIndex);
377 if (LR->end > EndIdx)
378 // If live range extend beyond end of mbb, add successors to work list.
379 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
380 SE = MBB->succ_end(); SI != SE; ++SI)
381 WorkList.push_back(*SI);
383 while (!WorkList.empty()) {
384 MachineBasicBlock *MBB = WorkList.back();
386 if (Processed.count(MBB))
388 unsigned Idx = LIs->getMBBStartIdx(MBB);
389 LR = CurrLI->getLiveRangeContaining(Idx);
390 if (LR && LR->valno == ValNo) {
391 EndIdx = LIs->getMBBEndIdx(MBB);
392 if (Idx <= RestoreIndex && RestoreIndex < EndIdx) {
393 // Spill slot live interval stops at the restore.
394 LiveRange SLR(Idx, RestoreIndex, CurrSValNo);
395 CurrSLI->addRange(SLR);
396 } else if (LR->end > EndIdx) {
397 // Live range extends beyond end of mbb, process successors.
398 LiveRange SLR(Idx, EndIdx+1, CurrSValNo);
399 CurrSLI->addRange(SLR);
400 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
401 SE = MBB->succ_end(); SI != SE; ++SI)
402 WorkList.push_back(*SI);
404 LiveRange SLR(Idx, LR->end, CurrSValNo);
405 CurrSLI->addRange(SLR);
407 Processed.insert(MBB);
412 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
413 /// construction algorithm to compute the ranges and valnos for an interval.
414 VNInfo* PreAllocSplitting::PerformPHIConstruction(
415 MachineBasicBlock::iterator use,
416 MachineBasicBlock* MBB,
418 SmallPtrSet<MachineInstr*, 4>& Visited,
419 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
420 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
421 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
422 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
423 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
424 bool toplevel, bool intrablock) {
425 // Return memoized result if it's available.
426 if (toplevel && Visited.count(use) && NewVNs.count(use))
428 else if (!toplevel && intrablock && NewVNs.count(use))
430 else if (!intrablock && LiveOut.count(MBB))
433 typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
435 // Check if our block contains any uses or defs.
436 bool ContainsDefs = Defs.count(MBB);
437 bool ContainsUses = Uses.count(MBB);
441 // Enumerate the cases of use/def contaning blocks.
442 if (!ContainsDefs && !ContainsUses) {
444 // NOTE: Because this is the fallback case from other cases, we do NOT
445 // assume that we are not intrablock here.
446 if (Phis.count(MBB)) return Phis[MBB];
448 unsigned StartIndex = LIs->getMBBStartIdx(MBB);
450 if (MBB->pred_size() == 1) {
451 Phis[MBB] = ret = PerformPHIConstruction((*MBB->pred_begin())->end(),
452 *(MBB->pred_begin()), LI, Visited,
453 Defs, Uses, NewVNs, LiveOut, Phis,
455 unsigned EndIndex = 0;
457 EndIndex = LIs->getInstructionIndex(use);
458 EndIndex = LiveIntervals::getUseIndex(EndIndex);
460 EndIndex = LIs->getMBBEndIdx(MBB);
462 LI->addRange(LiveRange(StartIndex, EndIndex+1, ret));
464 LI->addKill(ret, EndIndex);
466 Phis[MBB] = ret = LI->getNextValue(~0U, /*FIXME*/ 0,
467 LIs->getVNInfoAllocator());
468 if (!intrablock) LiveOut[MBB] = ret;
470 // If there are no uses or defs between our starting point and the
471 // beginning of the block, then recursive perform phi construction
472 // on our predecessors.
473 DenseMap<MachineBasicBlock*, VNInfo*> IncomingVNs;
474 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
475 PE = MBB->pred_end(); PI != PE; ++PI) {
476 VNInfo* Incoming = PerformPHIConstruction((*PI)->end(), *PI, LI,
477 Visited, Defs, Uses, NewVNs,
478 LiveOut, Phis, false, false);
480 IncomingVNs[*PI] = Incoming;
483 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
484 // VNInfo to represent the joined value.
485 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
486 IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
487 I->second->hasPHIKill = true;
488 unsigned KillIndex = LIs->getMBBEndIdx(I->first);
489 LI->addKill(I->second, KillIndex);
492 unsigned EndIndex = 0;
494 EndIndex = LIs->getInstructionIndex(use);
495 EndIndex = LiveIntervals::getUseIndex(EndIndex);
497 EndIndex = LIs->getMBBEndIdx(MBB);
498 LI->addRange(LiveRange(StartIndex, EndIndex+1, ret));
500 LI->addKill(ret, EndIndex);
502 } else if (ContainsDefs && !ContainsUses) {
503 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
505 // Search for the def in this block. If we don't find it before the
506 // instruction we care about, go to the fallback case. Note that that
507 // should never happen: this cannot be intrablock, so use should
508 // always be an end() iterator.
509 assert(use == MBB->end() && "No use marked in intrablock");
511 MachineBasicBlock::iterator walker = use;
513 while (walker != MBB->begin())
514 if (BlockDefs.count(walker)) {
519 // Once we've found it, extend its VNInfo to our instruction.
520 unsigned DefIndex = LIs->getInstructionIndex(walker);
521 DefIndex = LiveIntervals::getDefIndex(DefIndex);
522 unsigned EndIndex = LIs->getMBBEndIdx(MBB);
524 ret = NewVNs[walker];
525 LI->addRange(LiveRange(DefIndex, EndIndex+1, ret));
526 } else if (!ContainsDefs && ContainsUses) {
527 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
529 // Search for the use in this block that precedes the instruction we care
530 // about, going to the fallback case if we don't find it.
532 if (use == MBB->begin())
535 MachineBasicBlock::iterator walker = use;
538 while (walker != MBB->begin())
539 if (BlockUses.count(walker)) {
545 // Must check begin() too.
547 if (BlockUses.count(walker))
553 unsigned UseIndex = LIs->getInstructionIndex(walker);
554 UseIndex = LiveIntervals::getUseIndex(UseIndex);
555 unsigned EndIndex = 0;
557 EndIndex = LIs->getInstructionIndex(use);
558 EndIndex = LiveIntervals::getUseIndex(EndIndex);
560 EndIndex = LIs->getMBBEndIdx(MBB);
562 // Now, recursively phi construct the VNInfo for the use we found,
563 // and then extend it to include the instruction we care about
564 ret = PerformPHIConstruction(walker, MBB, LI, Visited, Defs, Uses,
565 NewVNs, LiveOut, Phis, false, true);
567 LI->addRange(LiveRange(UseIndex, EndIndex+1, ret));
569 // FIXME: Need to set kills properly for inter-block stuff.
570 if (LI->isKill(ret, UseIndex)) LI->removeKill(ret, UseIndex);
572 LI->addKill(ret, EndIndex);
573 } else if (ContainsDefs && ContainsUses){
574 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
575 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
577 // This case is basically a merging of the two preceding case, with the
578 // special note that checking for defs must take precedence over checking
579 // for uses, because of two-address instructions.
581 if (use == MBB->begin())
584 MachineBasicBlock::iterator walker = use;
586 bool foundDef = false;
587 bool foundUse = false;
588 while (walker != MBB->begin())
589 if (BlockDefs.count(walker)) {
592 } else if (BlockUses.count(walker)) {
598 // Must check begin() too.
599 if (!foundDef && !foundUse) {
600 if (BlockDefs.count(walker))
602 else if (BlockUses.count(walker))
608 unsigned StartIndex = LIs->getInstructionIndex(walker);
609 StartIndex = foundDef ? LiveIntervals::getDefIndex(StartIndex) :
610 LiveIntervals::getUseIndex(StartIndex);
611 unsigned EndIndex = 0;
613 EndIndex = LIs->getInstructionIndex(use);
614 EndIndex = LiveIntervals::getUseIndex(EndIndex);
616 EndIndex = LIs->getMBBEndIdx(MBB);
619 ret = NewVNs[walker];
621 ret = PerformPHIConstruction(walker, MBB, LI, Visited, Defs, Uses,
622 NewVNs, LiveOut, Phis, false, true);
624 LI->addRange(LiveRange(StartIndex, EndIndex+1, ret));
626 if (foundUse && LI->isKill(ret, StartIndex))
627 LI->removeKill(ret, StartIndex);
629 LI->addKill(ret, EndIndex);
633 // Memoize results so we don't have to recompute them.
634 if (!intrablock) LiveOut[MBB] = ret;
636 if (!NewVNs.count(use))
644 /// ReconstructLiveInterval - Recompute a live interval from scratch.
645 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
646 BumpPtrAllocator& Alloc = LIs->getVNInfoAllocator();
648 // Clear the old ranges and valnos;
651 // Cache the uses and defs of the register
652 typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
655 // Keep track of the new VNs we're creating.
656 DenseMap<MachineInstr*, VNInfo*> NewVNs;
657 SmallPtrSet<VNInfo*, 2> PhiVNs;
659 // Cache defs, and create a new VNInfo for each def.
660 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
661 DE = MRI->def_end(); DI != DE; ++DI) {
662 Defs[(*DI).getParent()].insert(&*DI);
664 unsigned DefIdx = LIs->getInstructionIndex(&*DI);
665 DefIdx = LiveIntervals::getDefIndex(DefIdx);
667 VNInfo* NewVN = LI->getNextValue(DefIdx, 0, Alloc);
669 // If the def is a move, set the copy field.
670 unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
671 if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
672 if (DstReg == LI->reg)
675 NewVNs[&*DI] = NewVN;
678 // Cache uses as a separate pass from actually processing them.
679 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
680 UE = MRI->use_end(); UI != UE; ++UI)
681 Uses[(*UI).getParent()].insert(&*UI);
683 // Now, actually process every use and use a phi construction algorithm
684 // to walk from it to its reaching definitions, building VNInfos along
686 DenseMap<MachineBasicBlock*, VNInfo*> LiveOut;
687 DenseMap<MachineBasicBlock*, VNInfo*> Phis;
688 SmallPtrSet<MachineInstr*, 4> Visited;
689 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
690 UE = MRI->use_end(); UI != UE; ++UI) {
691 PerformPHIConstruction(&*UI, UI->getParent(), LI, Visited, Defs,
692 Uses, NewVNs, LiveOut, Phis, true, true);
695 // Add ranges for dead defs
696 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
697 DE = MRI->def_end(); DI != DE; ++DI) {
698 unsigned DefIdx = LIs->getInstructionIndex(&*DI);
699 DefIdx = LiveIntervals::getDefIndex(DefIdx);
701 if (LI->liveAt(DefIdx)) continue;
703 VNInfo* DeadVN = NewVNs[&*DI];
704 LI->addRange(LiveRange(DefIdx, DefIdx+1, DeadVN));
705 LI->addKill(DeadVN, DefIdx);
709 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
710 /// be allocated to a different register. This function creates a new vreg,
711 /// copies the valno and its live ranges over to the new vreg's interval,
712 /// removes them from the old interval, and rewrites all uses and defs of
713 /// the original reg to the new vreg within those ranges.
714 void PreAllocSplitting::RenumberValno(VNInfo* VN) {
715 SmallVector<VNInfo*, 4> Stack;
716 SmallVector<VNInfo*, 4> VNsToCopy;
719 // Walk through and copy the valno we care about, and any other valnos
720 // that are two-address redefinitions of the one we care about. These
721 // will need to be rewritten as well. We also check for safety of the
722 // renumbering here, by making sure that none of the valno involved has
724 while (!Stack.empty()) {
725 VNInfo* OldVN = Stack.back();
728 // Bail out if we ever encounter a valno that has a PHI kill. We can't
730 if (OldVN->hasPHIKill) return;
732 VNsToCopy.push_back(OldVN);
734 // Locate two-address redefinitions
735 for (SmallVector<unsigned, 4>::iterator KI = OldVN->kills.begin(),
736 KE = OldVN->kills.end(); KI != KE; ++KI) {
737 MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
738 unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
739 if (DefIdx == ~0U) continue;
740 if (MI->isRegReDefinedByTwoAddr(DefIdx)) {
742 CurrLI->findDefinedVNInfo(LiveIntervals::getDefIndex(*KI));
743 if (NextVN == OldVN) continue;
744 Stack.push_back(NextVN);
749 // Create the new vreg
750 unsigned NewVReg = MRI->createVirtualRegister(MRI->getRegClass(CurrLI->reg));
752 // Create the new live interval
753 LiveInterval& NewLI = LIs->getOrCreateInterval(NewVReg);
755 for (SmallVector<VNInfo*, 4>::iterator OI = VNsToCopy.begin(), OE =
756 VNsToCopy.end(); OI != OE; ++OI) {
759 // Copy the valno over
760 VNInfo* NewVN = NewLI.getNextValue(OldVN->def, OldVN->copy,
761 LIs->getVNInfoAllocator());
762 NewLI.copyValNumInfo(NewVN, OldVN);
763 NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
765 // Remove the valno from the old interval
766 CurrLI->removeValNo(OldVN);
769 // Rewrite defs and uses. This is done in two stages to avoid invalidating
771 SmallVector<std::pair<MachineInstr*, unsigned>, 8> OpsToChange;
773 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
774 E = MRI->reg_end(); I != E; ++I) {
775 MachineOperand& MO = I.getOperand();
776 unsigned InstrIdx = LIs->getInstructionIndex(&*I);
778 if ((MO.isUse() && NewLI.liveAt(LiveIntervals::getUseIndex(InstrIdx))) ||
779 (MO.isDef() && NewLI.liveAt(LiveIntervals::getDefIndex(InstrIdx))))
780 OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
783 for (SmallVector<std::pair<MachineInstr*, unsigned>, 8>::iterator I =
784 OpsToChange.begin(), E = OpsToChange.end(); I != E; ++I) {
785 MachineInstr* Inst = I->first;
786 unsigned OpIdx = I->second;
787 MachineOperand& MO = Inst->getOperand(OpIdx);
791 // The renumbered vreg shares a stack slot with the old register.
792 if (IntervalSSMap.count(CurrLI->reg))
793 IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
798 bool PreAllocSplitting::Rematerialize(unsigned vreg, VNInfo* ValNo,
800 MachineBasicBlock::iterator RestorePt,
802 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
803 MachineBasicBlock& MBB = *RestorePt->getParent();
805 MachineBasicBlock::iterator KillPt = BarrierMBB->end();
806 unsigned KillIdx = 0;
807 if (ValNo->def == ~0U || DefMI->getParent() == BarrierMBB)
808 KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
810 KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx);
812 if (KillPt == DefMI->getParent()->end())
815 TII->reMaterialize(MBB, RestorePt, vreg, DefMI);
816 LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
818 ReconstructLiveInterval(CurrLI);
819 unsigned RematIdx = LIs->getInstructionIndex(prior(RestorePt));
820 RematIdx = LiveIntervals::getDefIndex(RematIdx);
821 RenumberValno(CurrLI->findDefinedVNInfo(RematIdx));
828 MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
829 const TargetRegisterClass* RC,
831 MachineInstr* Barrier,
832 MachineBasicBlock* MBB,
834 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
835 MachineBasicBlock::iterator Pt = MBB->begin();
837 // Go top down if RefsInMBB is empty.
838 if (RefsInMBB.empty())
841 MachineBasicBlock::iterator FoldPt = Barrier;
842 while (&*FoldPt != DefMI && FoldPt != MBB->begin() &&
843 !RefsInMBB.count(FoldPt))
846 int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg, false);
850 SmallVector<unsigned, 1> Ops;
851 Ops.push_back(OpIdx);
853 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
856 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(vreg);
857 if (I != IntervalSSMap.end()) {
860 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
864 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
868 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
869 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
872 IntervalSSMap[vreg] = SS;
873 CurrSLI = &LSs->getOrCreateInterval(SS);
874 if (CurrSLI->hasAtLeastOneValue())
875 CurrSValNo = CurrSLI->getValNumInfo(0);
877 CurrSValNo = CurrSLI->getNextValue(~0U, 0, LSs->getVNInfoAllocator());
883 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
884 /// so it would not cross the barrier that's being processed. Shrink wrap
885 /// (minimize) the live interval to the last uses.
886 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
889 // Find live range where current interval cross the barrier.
890 LiveInterval::iterator LR =
891 CurrLI->FindLiveRangeContaining(LIs->getUseIndex(BarrierIdx));
892 VNInfo *ValNo = LR->valno;
894 if (ValNo->def == ~1U) {
895 // Defined by a dead def? How can this be?
896 assert(0 && "Val# is defined by a dead def?");
900 MachineInstr *DefMI = (ValNo->def != ~0U)
901 ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
903 // If this would create a new join point, do not split.
904 if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent()))
907 // Find all references in the barrier mbb.
908 SmallPtrSet<MachineInstr*, 4> RefsInMBB;
909 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
910 E = MRI->reg_end(); I != E; ++I) {
911 MachineInstr *RefMI = &*I;
912 if (RefMI->getParent() == BarrierMBB)
913 RefsInMBB.insert(RefMI);
916 // Find a point to restore the value after the barrier.
917 unsigned RestoreIndex = 0;
918 MachineBasicBlock::iterator RestorePt =
919 findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
920 if (RestorePt == BarrierMBB->end())
923 if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
924 if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt,
925 RestoreIndex, RefsInMBB))
928 // Add a spill either before the barrier or after the definition.
929 MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
930 const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
931 unsigned SpillIndex = 0;
932 MachineInstr *SpillMI = NULL;
934 if (ValNo->def == ~0U) {
935 // If it's defined by a phi, we must split just before the barrier.
936 if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
937 BarrierMBB, SS, RefsInMBB))) {
938 SpillIndex = LIs->getInstructionIndex(SpillMI);
940 MachineBasicBlock::iterator SpillPt =
941 findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, SpillIndex);
942 if (SpillPt == BarrierMBB->begin())
943 return false; // No gap to insert spill.
946 SS = CreateSpillStackSlot(CurrLI->reg, RC);
947 TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC);
948 SpillMI = prior(SpillPt);
949 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
951 } else if (!IsAvailableInStack(DefMBB, CurrLI->reg, ValNo->def,
952 RestoreIndex, SpillIndex, SS)) {
953 // If it's already split, just restore the value. There is no need to spill
956 return false; // Def is dead. Do nothing.
958 if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
959 BarrierMBB, SS, RefsInMBB))) {
960 SpillIndex = LIs->getInstructionIndex(SpillMI);
962 // Check if it's possible to insert a spill after the def MI.
963 MachineBasicBlock::iterator SpillPt;
964 if (DefMBB == BarrierMBB) {
965 // Add spill after the def and the last use before the barrier.
966 SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
967 RefsInMBB, SpillIndex);
968 if (SpillPt == DefMBB->begin())
969 return false; // No gap to insert spill.
971 SpillPt = findNextEmptySlot(DefMBB, DefMI, SpillIndex);
972 if (SpillPt == DefMBB->end())
973 return false; // No gap to insert spill.
975 // Add spill. The store instruction kills the register if def is before
976 // the barrier in the barrier block.
977 SS = CreateSpillStackSlot(CurrLI->reg, RC);
978 TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg,
979 DefMBB == BarrierMBB, SS, RC);
980 SpillMI = prior(SpillPt);
981 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
985 // Remember def instruction index to spill index mapping.
986 if (DefMI && SpillMI)
987 Def2SpillMap[ValNo->def] = SpillIndex;
990 TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
991 MachineInstr *LoadMI = prior(RestorePt);
992 LIs->InsertMachineInstrInMaps(LoadMI, RestoreIndex);
994 // Update spill stack slot live interval.
995 UpdateSpillSlotInterval(ValNo, LIs->getUseIndex(SpillIndex)+1,
996 LIs->getDefIndex(RestoreIndex));
998 ReconstructLiveInterval(CurrLI);
999 unsigned RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
1000 RestoreIdx = LiveIntervals::getDefIndex(RestoreIdx);
1001 RenumberValno(CurrLI->findDefinedVNInfo(RestoreIdx));
1007 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1008 /// barrier that's being processed.
1010 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
1011 SmallPtrSet<LiveInterval*, 8>& Split) {
1012 // First find all the virtual registers whose live intervals are intercepted
1013 // by the current barrier.
1014 SmallVector<LiveInterval*, 8> Intervals;
1015 for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
1016 if (TII->IgnoreRegisterClassBarriers(*RC))
1018 std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
1019 for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
1020 unsigned Reg = VRs[i];
1021 if (!LIs->hasInterval(Reg))
1023 LiveInterval *LI = &LIs->getInterval(Reg);
1024 if (LI->liveAt(BarrierIdx) && !Barrier->readsRegister(Reg))
1025 // Virtual register live interval is intercepted by the barrier. We
1026 // should split and shrink wrap its interval if possible.
1027 Intervals.push_back(LI);
1031 // Process the affected live intervals.
1032 bool Change = false;
1033 while (!Intervals.empty()) {
1034 if (PreSplitLimit != -1 && (int)NumSplits == PreSplitLimit)
1036 else if (NumSplits == 4)
1038 LiveInterval *LI = Intervals.back();
1039 Intervals.pop_back();
1040 bool result = SplitRegLiveInterval(LI);
1041 if (result) Split.insert(LI);
1048 unsigned PreAllocSplitting::getNumberOfNonSpills(
1049 SmallPtrSet<MachineInstr*, 4>& MIs,
1050 unsigned Reg, int FrameIndex,
1051 bool& FeedsTwoAddr) {
1052 unsigned NonSpills = 0;
1053 for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
1055 int StoreFrameIndex;
1056 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1057 if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
1060 int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
1061 if (DefIdx != -1 && (*UI)->isRegReDefinedByTwoAddr(DefIdx))
1062 FeedsTwoAddr = true;
1068 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1069 /// split, and see if any of the spills are unnecessary. If so, remove them.
1070 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
1071 bool changed = false;
1073 // Walk over all of the live intervals that were touched by the splitter,
1074 // and see if we can do any DCE and/or folding.
1075 for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
1076 LE = split.end(); LI != LE; ++LI) {
1077 DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
1079 // First, collect all the uses of the vreg, and sort them by their
1080 // reaching definition (VNInfo).
1081 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
1082 UE = MRI->use_end(); UI != UE; ++UI) {
1083 unsigned index = LIs->getInstructionIndex(&*UI);
1084 index = LiveIntervals::getUseIndex(index);
1086 const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
1087 VNUseCount[LR->valno].insert(&*UI);
1090 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1091 // and/or fold them away.
1092 for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
1093 VE = (*LI)->vni_end(); VI != VE; ++VI) {
1095 if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit)
1098 VNInfo* CurrVN = *VI;
1100 // We don't currently try to handle definitions with PHI kills, because
1101 // it would involve processing more than one VNInfo at once.
1102 if (CurrVN->hasPHIKill) continue;
1104 // We also don't try to handle the results of PHI joins, since there's
1105 // no defining instruction to analyze.
1106 unsigned DefIdx = CurrVN->def;
1107 if (DefIdx == ~0U || DefIdx == ~1U) continue;
1109 // We're only interested in eliminating cruft introduced by the splitter,
1110 // is of the form load-use or load-use-store. First, check that the
1111 // definition is a load, and remember what stack slot we loaded it from.
1112 MachineInstr* DefMI = LIs->getInstructionFromIndex(DefIdx);
1114 if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
1116 // If the definition has no uses at all, just DCE it.
1117 if (VNUseCount[CurrVN].size() == 0) {
1118 LIs->RemoveMachineInstrFromMaps(DefMI);
1119 (*LI)->removeValNo(CurrVN);
1120 DefMI->eraseFromParent();
1126 // Second, get the number of non-store uses of the definition, as well as
1127 // a flag indicating whether it feeds into a later two-address definition.
1128 bool FeedsTwoAddr = false;
1129 unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
1130 (*LI)->reg, FrameIndex,
1133 // If there's one non-store use and it doesn't feed a two-addr, then
1134 // this is a load-use-store case that we can try to fold.
1135 if (NonSpillCount == 1 && !FeedsTwoAddr) {
1136 // Start by finding the non-store use MachineInstr.
1137 SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
1138 int StoreFrameIndex;
1139 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1140 while (UI != VNUseCount[CurrVN].end() &&
1141 (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
1143 if (UI != VNUseCount[CurrVN].end())
1144 StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1146 if (UI == VNUseCount[CurrVN].end()) continue;
1148 MachineInstr* use = *UI;
1150 // Attempt to fold it away!
1151 int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
1152 if (OpIdx == -1) continue;
1153 SmallVector<unsigned, 1> Ops;
1154 Ops.push_back(OpIdx);
1155 if (!TII->canFoldMemoryOperand(use, Ops)) continue;
1157 MachineInstr* NewMI =
1158 TII->foldMemoryOperand(*use->getParent()->getParent(),
1159 use, Ops, FrameIndex);
1161 if (!NewMI) continue;
1163 // Update relevant analyses.
1164 LIs->RemoveMachineInstrFromMaps(DefMI);
1165 LIs->ReplaceMachineInstrInMaps(use, NewMI);
1166 (*LI)->removeValNo(CurrVN);
1168 DefMI->eraseFromParent();
1169 MachineBasicBlock* MBB = use->getParent();
1170 NewMI = MBB->insert(MBB->erase(use), NewMI);
1171 VNUseCount[CurrVN].erase(use);
1173 // Remove deleted instructions. Note that we need to remove them from
1174 // the VNInfo->use map as well, just to be safe.
1175 for (SmallPtrSet<MachineInstr*, 4>::iterator II =
1176 VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
1178 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1179 VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
1180 VI->second.erase(*II);
1181 LIs->RemoveMachineInstrFromMaps(*II);
1182 (*II)->eraseFromParent();
1185 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1186 VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
1187 if (VI->second.erase(use))
1188 VI->second.insert(NewMI);
1195 // If there's more than one non-store instruction, we can't profitably
1196 // fold it, so bail.
1197 if (NonSpillCount) continue;
1199 // Otherwise, this is a load-store case, so DCE them.
1200 for (SmallPtrSet<MachineInstr*, 4>::iterator UI =
1201 VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
1203 LIs->RemoveMachineInstrFromMaps(*UI);
1204 (*UI)->eraseFromParent();
1207 LIs->RemoveMachineInstrFromMaps(DefMI);
1208 (*LI)->removeValNo(CurrVN);
1209 DefMI->eraseFromParent();
1218 bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
1219 MachineBasicBlock* DefMBB,
1220 MachineBasicBlock* BarrierMBB) {
1221 if (DefMBB == BarrierMBB)
1224 if (LR->valno->hasPHIKill)
1227 unsigned MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
1228 if (LR->end < MBBEnd)
1231 MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
1232 if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
1235 MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
1236 SmallPtrSet<MachineBasicBlock*, 4> Visited;
1237 typedef std::pair<MachineBasicBlock*,
1238 MachineBasicBlock::succ_iterator> ItPair;
1239 SmallVector<ItPair, 4> Stack;
1240 Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
1242 while (!Stack.empty()) {
1243 ItPair P = Stack.back();
1246 MachineBasicBlock* PredMBB = P.first;
1247 MachineBasicBlock::succ_iterator S = P.second;
1249 if (S == PredMBB->succ_end())
1251 else if (Visited.count(*S)) {
1252 Stack.push_back(std::make_pair(PredMBB, ++S));
1255 Stack.push_back(std::make_pair(PredMBB, S+1));
1257 MachineBasicBlock* MBB = *S;
1258 Visited.insert(MBB);
1260 if (MBB == BarrierMBB)
1263 MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
1264 MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
1265 MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
1267 if (MDTN == DefMDTN)
1269 else if (MDTN == BarrierMDTN)
1271 MDTN = MDTN->getIDom();
1274 MBBEnd = LIs->getMBBEndIdx(MBB);
1275 if (LR->end > MBBEnd)
1276 Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
1283 bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
1285 TM = &MF.getTarget();
1286 TII = TM->getInstrInfo();
1287 MFI = MF.getFrameInfo();
1288 MRI = &MF.getRegInfo();
1289 LIs = &getAnalysis<LiveIntervals>();
1290 LSs = &getAnalysis<LiveStacks>();
1292 bool MadeChange = false;
1294 // Make sure blocks are numbered in order.
1295 MF.RenumberBlocks();
1297 MachineBasicBlock *Entry = MF.begin();
1298 SmallPtrSet<MachineBasicBlock*,16> Visited;
1300 SmallPtrSet<LiveInterval*, 8> Split;
1302 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
1303 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1306 for (MachineBasicBlock::iterator I = BarrierMBB->begin(),
1307 E = BarrierMBB->end(); I != E; ++I) {
1309 const TargetRegisterClass **BarrierRCs =
1310 Barrier->getDesc().getRegClassBarriers();
1313 BarrierIdx = LIs->getInstructionIndex(Barrier);
1314 MadeChange |= SplitRegLiveIntervals(BarrierRCs, Split);
1318 MadeChange |= removeDeadSpills(Split);