1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "VirtRegMap.h"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "llvm/CodeGen/LiveStackAnalysis.h"
21 #include "llvm/CodeGen/MachineDominators.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunctionPass.h"
24 #include "llvm/CodeGen/MachineLoopInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/CodeGen/Passes.h"
27 #include "llvm/CodeGen/RegisterCoalescer.h"
28 #include "llvm/Target/TargetInstrInfo.h"
29 #include "llvm/Target/TargetMachine.h"
30 #include "llvm/Target/TargetOptions.h"
31 #include "llvm/Target/TargetRegisterInfo.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/ADT/DenseMap.h"
36 #include "llvm/ADT/DepthFirstIterator.h"
37 #include "llvm/ADT/SmallPtrSet.h"
38 #include "llvm/ADT/Statistic.h"
41 static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
42 static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1), cl::Hidden);
43 static cl::opt<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1), cl::Hidden);
45 STATISTIC(NumSplits, "Number of intervals split");
46 STATISTIC(NumRemats, "Number of intervals split by rematerialization");
47 STATISTIC(NumFolds, "Number of intervals split with spill folding");
48 STATISTIC(NumRestoreFolds, "Number of intervals split with restore folding");
49 STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
50 STATISTIC(NumDeadSpills, "Number of dead spills removed");
53 class PreAllocSplitting : public MachineFunctionPass {
54 MachineFunction *CurrMF;
55 const TargetMachine *TM;
56 const TargetInstrInfo *TII;
57 const TargetRegisterInfo* TRI;
58 MachineFrameInfo *MFI;
59 MachineRegisterInfo *MRI;
65 // Barrier - Current barrier being processed.
66 MachineInstr *Barrier;
68 // BarrierMBB - Basic block where the barrier resides in.
69 MachineBasicBlock *BarrierMBB;
71 // Barrier - Current barrier index.
74 // CurrLI - Current live interval being split.
77 // CurrSLI - Current stack slot live interval.
78 LiveInterval *CurrSLI;
80 // CurrSValNo - Current val# for the stack slot live interval.
83 // IntervalSSMap - A map from live interval to spill slots.
84 DenseMap<unsigned, int> IntervalSSMap;
86 // Def2SpillMap - A map from a def instruction index to spill index.
87 DenseMap<SlotIndex, SlotIndex> Def2SpillMap;
92 : MachineFunctionPass(&ID) {}
94 virtual bool runOnMachineFunction(MachineFunction &MF);
96 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
98 AU.addRequired<SlotIndexes>();
99 AU.addPreserved<SlotIndexes>();
100 AU.addRequired<LiveIntervals>();
101 AU.addPreserved<LiveIntervals>();
102 AU.addRequired<LiveStacks>();
103 AU.addPreserved<LiveStacks>();
104 AU.addPreserved<RegisterCoalescer>();
106 AU.addPreservedID(StrongPHIEliminationID);
108 AU.addPreservedID(PHIEliminationID);
109 AU.addRequired<MachineDominatorTree>();
110 AU.addRequired<MachineLoopInfo>();
111 AU.addRequired<VirtRegMap>();
112 AU.addPreserved<MachineDominatorTree>();
113 AU.addPreserved<MachineLoopInfo>();
114 AU.addPreserved<VirtRegMap>();
115 MachineFunctionPass::getAnalysisUsage(AU);
118 virtual void releaseMemory() {
119 IntervalSSMap.clear();
120 Def2SpillMap.clear();
123 virtual const char *getPassName() const {
124 return "Pre-Register Allocaton Live Interval Splitting";
127 /// print - Implement the dump method.
128 virtual void print(raw_ostream &O, const Module* M = 0) const {
134 MachineBasicBlock::iterator
135 findNextEmptySlot(MachineBasicBlock*, MachineInstr*,
138 MachineBasicBlock::iterator
139 findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
140 SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
142 MachineBasicBlock::iterator
143 findRestorePoint(MachineBasicBlock*, MachineInstr*, SlotIndex,
144 SmallPtrSet<MachineInstr*, 4>&, SlotIndex&);
146 int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
148 bool IsAvailableInStack(MachineBasicBlock*, unsigned,
149 SlotIndex, SlotIndex,
150 SlotIndex&, int&) const;
152 void UpdateSpillSlotInterval(VNInfo*, SlotIndex, SlotIndex);
154 bool SplitRegLiveInterval(LiveInterval*);
156 bool SplitRegLiveIntervals(const TargetRegisterClass **,
157 SmallPtrSet<LiveInterval*, 8>&);
159 bool createsNewJoin(LiveRange* LR, MachineBasicBlock* DefMBB,
160 MachineBasicBlock* BarrierMBB);
161 bool Rematerialize(unsigned vreg, VNInfo* ValNo,
163 MachineBasicBlock::iterator RestorePt,
164 SlotIndex RestoreIdx,
165 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
166 MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
168 MachineInstr* Barrier,
169 MachineBasicBlock* MBB,
171 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
172 MachineInstr* FoldRestore(unsigned vreg,
173 const TargetRegisterClass* RC,
174 MachineInstr* Barrier,
175 MachineBasicBlock* MBB,
177 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
178 void RenumberValno(VNInfo* VN);
179 void ReconstructLiveInterval(LiveInterval* LI);
180 bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
181 unsigned getNumberOfNonSpills(SmallPtrSet<MachineInstr*, 4>& MIs,
182 unsigned Reg, int FrameIndex, bool& TwoAddr);
183 VNInfo* PerformPHIConstruction(MachineBasicBlock::iterator Use,
184 MachineBasicBlock* MBB, LiveInterval* LI,
185 SmallPtrSet<MachineInstr*, 4>& Visited,
186 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
187 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
188 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
189 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
190 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
191 bool IsTopLevel, bool IsIntraBlock);
192 VNInfo* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use,
193 MachineBasicBlock* MBB, LiveInterval* LI,
194 SmallPtrSet<MachineInstr*, 4>& Visited,
195 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
196 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
197 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
198 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
199 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
200 bool IsTopLevel, bool IsIntraBlock);
202 } // end anonymous namespace
204 char PreAllocSplitting::ID = 0;
206 static RegisterPass<PreAllocSplitting>
207 X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
209 const PassInfo *const llvm::PreAllocSplittingID = &X;
212 /// findNextEmptySlot - Find a gap after the given machine instruction in the
213 /// instruction index map. If there isn't one, return end().
214 MachineBasicBlock::iterator
215 PreAllocSplitting::findNextEmptySlot(MachineBasicBlock *MBB, MachineInstr *MI,
216 SlotIndex &SpotIndex) {
217 MachineBasicBlock::iterator MII = MI;
218 if (++MII != MBB->end()) {
220 LIs->findGapBeforeInstr(LIs->getInstructionIndex(MII));
221 if (Index != SlotIndex()) {
229 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
230 /// for spilling the current live interval. The index must be before any
231 /// defs and uses of the live interval register in the mbb. Return begin() if
233 MachineBasicBlock::iterator
234 PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
236 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
237 SlotIndex &SpillIndex) {
238 MachineBasicBlock::iterator Pt = MBB->begin();
240 MachineBasicBlock::iterator MII = MI;
241 MachineBasicBlock::iterator EndPt = DefMI
242 ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
244 while (MII != EndPt && !RefsInMBB.count(MII) &&
245 MII->getOpcode() != TRI->getCallFrameSetupOpcode())
247 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
249 while (MII != EndPt && !RefsInMBB.count(MII)) {
250 SlotIndex Index = LIs->getInstructionIndex(MII);
252 // We can't insert the spill between the barrier (a call), and its
253 // corresponding call frame setup.
254 if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
255 while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
262 } else if (LIs->hasGapBeforeInstr(Index)) {
264 SpillIndex = LIs->findGapBeforeInstr(Index, true);
267 if (RefsInMBB.count(MII))
277 /// findRestorePoint - Find a gap in the instruction index map that's suitable
278 /// for restoring the current live interval value. The index must be before any
279 /// uses of the live interval register in the mbb. Return end() if none is
281 MachineBasicBlock::iterator
282 PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
284 SmallPtrSet<MachineInstr*, 4> &RefsInMBB,
285 SlotIndex &RestoreIndex) {
286 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
287 // begin index accordingly.
288 MachineBasicBlock::iterator Pt = MBB->end();
289 MachineBasicBlock::iterator EndPt = MBB->getFirstTerminator();
291 // We start at the call, so walk forward until we find the call frame teardown
292 // since we can't insert restores before that. Bail if we encounter a use
294 MachineBasicBlock::iterator MII = MI;
295 if (MII == EndPt) return Pt;
297 while (MII != EndPt && !RefsInMBB.count(MII) &&
298 MII->getOpcode() != TRI->getCallFrameDestroyOpcode())
300 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
303 // FIXME: Limit the number of instructions to examine to reduce
305 while (MII != EndPt) {
306 SlotIndex Index = LIs->getInstructionIndex(MII);
309 SlotIndex Gap = LIs->findGapBeforeInstr(Index);
311 // We can't insert a restore between the barrier (a call) and its
312 // corresponding call frame teardown.
313 if (MII->getOpcode() == TRI->getCallFrameSetupOpcode()) {
315 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
317 } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
318 } else if (Gap != SlotIndex()) {
323 if (RefsInMBB.count(MII))
332 /// CreateSpillStackSlot - Create a stack slot for the live interval being
333 /// split. If the live interval was previously split, just reuse the same
335 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
336 const TargetRegisterClass *RC) {
338 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
339 if (I != IntervalSSMap.end()) {
342 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
343 IntervalSSMap[Reg] = SS;
346 // Create live interval for stack slot.
347 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
348 if (CurrSLI->hasAtLeastOneValue())
349 CurrSValNo = CurrSLI->getValNumInfo(0);
351 CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
352 LSs->getVNInfoAllocator());
356 /// IsAvailableInStack - Return true if register is available in a split stack
357 /// slot at the specified index.
359 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
360 unsigned Reg, SlotIndex DefIndex,
361 SlotIndex RestoreIndex,
362 SlotIndex &SpillIndex,
367 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
368 if (I == IntervalSSMap.end())
370 DenseMap<SlotIndex, SlotIndex>::iterator
371 II = Def2SpillMap.find(DefIndex);
372 if (II == Def2SpillMap.end())
375 // If last spill of def is in the same mbb as barrier mbb (where restore will
376 // be), make sure it's not below the intended restore index.
377 // FIXME: Undo the previous spill?
378 assert(LIs->getMBBFromIndex(II->second) == DefMBB);
379 if (DefMBB == BarrierMBB && II->second >= RestoreIndex)
383 SpillIndex = II->second;
387 /// UpdateSpillSlotInterval - Given the specified val# of the register live
388 /// interval being split, and the spill and restore indicies, update the live
389 /// interval of the spill stack slot.
391 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex,
392 SlotIndex RestoreIndex) {
393 assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
394 "Expect restore in the barrier mbb");
396 MachineBasicBlock *MBB = LIs->getMBBFromIndex(SpillIndex);
397 if (MBB == BarrierMBB) {
398 // Intra-block spill + restore. We are done.
399 LiveRange SLR(SpillIndex, RestoreIndex, CurrSValNo);
400 CurrSLI->addRange(SLR);
404 SmallPtrSet<MachineBasicBlock*, 4> Processed;
405 SlotIndex EndIdx = LIs->getMBBEndIdx(MBB);
406 LiveRange SLR(SpillIndex, EndIdx.getNextSlot(), CurrSValNo);
407 CurrSLI->addRange(SLR);
408 Processed.insert(MBB);
410 // Start from the spill mbb, figure out the extend of the spill slot's
412 SmallVector<MachineBasicBlock*, 4> WorkList;
413 const LiveRange *LR = CurrLI->getLiveRangeContaining(SpillIndex);
414 if (LR->end > EndIdx)
415 // If live range extend beyond end of mbb, add successors to work list.
416 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
417 SE = MBB->succ_end(); SI != SE; ++SI)
418 WorkList.push_back(*SI);
420 while (!WorkList.empty()) {
421 MachineBasicBlock *MBB = WorkList.back();
423 if (Processed.count(MBB))
425 SlotIndex Idx = LIs->getMBBStartIdx(MBB);
426 LR = CurrLI->getLiveRangeContaining(Idx);
427 if (LR && LR->valno == ValNo) {
428 EndIdx = LIs->getMBBEndIdx(MBB);
429 if (Idx <= RestoreIndex && RestoreIndex < EndIdx) {
430 // Spill slot live interval stops at the restore.
431 LiveRange SLR(Idx, RestoreIndex, CurrSValNo);
432 CurrSLI->addRange(SLR);
433 } else if (LR->end > EndIdx) {
434 // Live range extends beyond end of mbb, process successors.
435 LiveRange SLR(Idx, EndIdx.getNextIndex(), CurrSValNo);
436 CurrSLI->addRange(SLR);
437 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
438 SE = MBB->succ_end(); SI != SE; ++SI)
439 WorkList.push_back(*SI);
441 LiveRange SLR(Idx, LR->end, CurrSValNo);
442 CurrSLI->addRange(SLR);
444 Processed.insert(MBB);
449 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
450 /// construction algorithm to compute the ranges and valnos for an interval.
452 PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
453 MachineBasicBlock* MBB, LiveInterval* LI,
454 SmallPtrSet<MachineInstr*, 4>& Visited,
455 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
456 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
457 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
458 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
459 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
460 bool IsTopLevel, bool IsIntraBlock) {
461 // Return memoized result if it's available.
462 if (IsTopLevel && Visited.count(UseI) && NewVNs.count(UseI))
464 else if (!IsTopLevel && IsIntraBlock && NewVNs.count(UseI))
466 else if (!IsIntraBlock && LiveOut.count(MBB))
469 // Check if our block contains any uses or defs.
470 bool ContainsDefs = Defs.count(MBB);
471 bool ContainsUses = Uses.count(MBB);
475 // Enumerate the cases of use/def contaning blocks.
476 if (!ContainsDefs && !ContainsUses) {
477 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
478 NewVNs, LiveOut, Phis,
479 IsTopLevel, IsIntraBlock);
480 } else if (ContainsDefs && !ContainsUses) {
481 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
483 // Search for the def in this block. If we don't find it before the
484 // instruction we care about, go to the fallback case. Note that that
485 // should never happen: this cannot be intrablock, so use should
486 // always be an end() iterator.
487 assert(UseI == MBB->end() && "No use marked in intrablock");
489 MachineBasicBlock::iterator Walker = UseI;
491 while (Walker != MBB->begin()) {
492 if (BlockDefs.count(Walker))
497 // Once we've found it, extend its VNInfo to our instruction.
498 SlotIndex DefIndex = LIs->getInstructionIndex(Walker);
499 DefIndex = DefIndex.getDefIndex();
500 SlotIndex EndIndex = LIs->getMBBEndIdx(MBB);
502 RetVNI = NewVNs[Walker];
503 LI->addRange(LiveRange(DefIndex, EndIndex.getNextSlot(), RetVNI));
504 } else if (!ContainsDefs && ContainsUses) {
505 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
507 // Search for the use in this block that precedes the instruction we care
508 // about, going to the fallback case if we don't find it.
509 if (UseI == MBB->begin())
510 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
511 Uses, NewVNs, LiveOut, Phis,
512 IsTopLevel, IsIntraBlock);
514 MachineBasicBlock::iterator Walker = UseI;
517 while (Walker != MBB->begin()) {
518 if (BlockUses.count(Walker)) {
525 // Must check begin() too.
527 if (BlockUses.count(Walker))
530 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
531 Uses, NewVNs, LiveOut, Phis,
532 IsTopLevel, IsIntraBlock);
535 SlotIndex UseIndex = LIs->getInstructionIndex(Walker);
536 UseIndex = UseIndex.getUseIndex();
539 EndIndex = LIs->getInstructionIndex(UseI);
540 EndIndex = EndIndex.getUseIndex();
542 EndIndex = LIs->getMBBEndIdx(MBB);
544 // Now, recursively phi construct the VNInfo for the use we found,
545 // and then extend it to include the instruction we care about
546 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
547 NewVNs, LiveOut, Phis, false, true);
549 LI->addRange(LiveRange(UseIndex, EndIndex.getNextSlot(), RetVNI));
551 // FIXME: Need to set kills properly for inter-block stuff.
552 if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
554 RetVNI->addKill(EndIndex);
555 } else if (ContainsDefs && ContainsUses) {
556 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
557 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
559 // This case is basically a merging of the two preceding case, with the
560 // special note that checking for defs must take precedence over checking
561 // for uses, because of two-address instructions.
563 if (UseI == MBB->begin())
564 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
565 NewVNs, LiveOut, Phis,
566 IsTopLevel, IsIntraBlock);
568 MachineBasicBlock::iterator Walker = UseI;
570 bool foundDef = false;
571 bool foundUse = false;
572 while (Walker != MBB->begin()) {
573 if (BlockDefs.count(Walker)) {
576 } else if (BlockUses.count(Walker)) {
583 // Must check begin() too.
584 if (!foundDef && !foundUse) {
585 if (BlockDefs.count(Walker))
587 else if (BlockUses.count(Walker))
590 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
591 Uses, NewVNs, LiveOut, Phis,
592 IsTopLevel, IsIntraBlock);
595 SlotIndex StartIndex = LIs->getInstructionIndex(Walker);
596 StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex();
599 EndIndex = LIs->getInstructionIndex(UseI);
600 EndIndex = EndIndex.getUseIndex();
602 EndIndex = LIs->getMBBEndIdx(MBB);
605 RetVNI = NewVNs[Walker];
607 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
608 NewVNs, LiveOut, Phis, false, true);
610 LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
612 if (foundUse && RetVNI->isKill(StartIndex))
613 RetVNI->removeKill(StartIndex);
615 RetVNI->addKill(EndIndex);
619 // Memoize results so we don't have to recompute them.
620 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
622 if (!NewVNs.count(UseI))
623 NewVNs[UseI] = RetVNI;
624 Visited.insert(UseI);
630 /// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
633 PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI,
634 MachineBasicBlock* MBB, LiveInterval* LI,
635 SmallPtrSet<MachineInstr*, 4>& Visited,
636 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
637 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
638 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
639 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
640 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
641 bool IsTopLevel, bool IsIntraBlock) {
642 // NOTE: Because this is the fallback case from other cases, we do NOT
643 // assume that we are not intrablock here.
644 if (Phis.count(MBB)) return Phis[MBB];
646 SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
647 VNInfo *RetVNI = Phis[MBB] =
648 LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
649 LIs->getVNInfoAllocator());
651 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
653 // If there are no uses or defs between our starting point and the
654 // beginning of the block, then recursive perform phi construction
655 // on our predecessors.
656 DenseMap<MachineBasicBlock*, VNInfo*> IncomingVNs;
657 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
658 PE = MBB->pred_end(); PI != PE; ++PI) {
659 VNInfo* Incoming = PerformPHIConstruction((*PI)->end(), *PI, LI,
660 Visited, Defs, Uses, NewVNs,
661 LiveOut, Phis, false, false);
663 IncomingVNs[*PI] = Incoming;
666 if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
667 VNInfo* OldVN = RetVNI;
668 VNInfo* NewVN = IncomingVNs.begin()->second;
669 VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
670 if (MergedVN == OldVN) std::swap(OldVN, NewVN);
672 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator LOI = LiveOut.begin(),
673 LOE = LiveOut.end(); LOI != LOE; ++LOI)
674 if (LOI->second == OldVN)
675 LOI->second = MergedVN;
676 for (DenseMap<MachineInstr*, VNInfo*>::iterator NVI = NewVNs.begin(),
677 NVE = NewVNs.end(); NVI != NVE; ++NVI)
678 if (NVI->second == OldVN)
679 NVI->second = MergedVN;
680 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator PI = Phis.begin(),
681 PE = Phis.end(); PI != PE; ++PI)
682 if (PI->second == OldVN)
683 PI->second = MergedVN;
686 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
687 // VNInfo to represent the joined value.
688 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
689 IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
690 I->second->setHasPHIKill(true);
691 SlotIndex KillIndex = LIs->getMBBEndIdx(I->first);
692 if (!I->second->isKill(KillIndex))
693 I->second->addKill(KillIndex);
699 EndIndex = LIs->getInstructionIndex(UseI);
700 EndIndex = EndIndex.getUseIndex();
702 EndIndex = LIs->getMBBEndIdx(MBB);
703 LI->addRange(LiveRange(StartIndex, EndIndex.getNextSlot(), RetVNI));
705 RetVNI->addKill(EndIndex);
707 // Memoize results so we don't have to recompute them.
709 LiveOut[MBB] = RetVNI;
711 if (!NewVNs.count(UseI))
712 NewVNs[UseI] = RetVNI;
713 Visited.insert(UseI);
719 /// ReconstructLiveInterval - Recompute a live interval from scratch.
720 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
721 BumpPtrAllocator& Alloc = LIs->getVNInfoAllocator();
723 // Clear the old ranges and valnos;
726 // Cache the uses and defs of the register
727 typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
730 // Keep track of the new VNs we're creating.
731 DenseMap<MachineInstr*, VNInfo*> NewVNs;
732 SmallPtrSet<VNInfo*, 2> PhiVNs;
734 // Cache defs, and create a new VNInfo for each def.
735 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
736 DE = MRI->def_end(); DI != DE; ++DI) {
737 Defs[(*DI).getParent()].insert(&*DI);
739 SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
740 DefIdx = DefIdx.getDefIndex();
742 assert(DI->getOpcode() != TargetInstrInfo::PHI &&
743 "Following NewVN isPHIDef flag incorrect. Fix me!");
744 VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
746 // If the def is a move, set the copy field.
747 unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
748 if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
749 if (DstReg == LI->reg)
750 NewVN->setCopy(&*DI);
752 NewVNs[&*DI] = NewVN;
755 // Cache uses as a separate pass from actually processing them.
756 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
757 UE = MRI->use_end(); UI != UE; ++UI)
758 Uses[(*UI).getParent()].insert(&*UI);
760 // Now, actually process every use and use a phi construction algorithm
761 // to walk from it to its reaching definitions, building VNInfos along
763 DenseMap<MachineBasicBlock*, VNInfo*> LiveOut;
764 DenseMap<MachineBasicBlock*, VNInfo*> Phis;
765 SmallPtrSet<MachineInstr*, 4> Visited;
766 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
767 UE = MRI->use_end(); UI != UE; ++UI) {
768 PerformPHIConstruction(&*UI, UI->getParent(), LI, Visited, Defs,
769 Uses, NewVNs, LiveOut, Phis, true, true);
772 // Add ranges for dead defs
773 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
774 DE = MRI->def_end(); DI != DE; ++DI) {
775 SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
776 DefIdx = DefIdx.getDefIndex();
778 if (LI->liveAt(DefIdx)) continue;
780 VNInfo* DeadVN = NewVNs[&*DI];
781 LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
782 DeadVN->addKill(DefIdx);
785 // Update kill markers.
786 for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
789 for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
790 SlotIndex KillIdx = VNI->kills[i];
793 MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
795 MachineOperand *KillMO = KillMI->findRegisterUseOperand(CurrLI->reg);
797 // It could be a dead def.
804 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
805 /// be allocated to a different register. This function creates a new vreg,
806 /// copies the valno and its live ranges over to the new vreg's interval,
807 /// removes them from the old interval, and rewrites all uses and defs of
808 /// the original reg to the new vreg within those ranges.
809 void PreAllocSplitting::RenumberValno(VNInfo* VN) {
810 SmallVector<VNInfo*, 4> Stack;
811 SmallVector<VNInfo*, 4> VNsToCopy;
814 // Walk through and copy the valno we care about, and any other valnos
815 // that are two-address redefinitions of the one we care about. These
816 // will need to be rewritten as well. We also check for safety of the
817 // renumbering here, by making sure that none of the valno involved has
819 while (!Stack.empty()) {
820 VNInfo* OldVN = Stack.back();
823 // Bail out if we ever encounter a valno that has a PHI kill. We can't
825 if (OldVN->hasPHIKill()) return;
827 VNsToCopy.push_back(OldVN);
829 // Locate two-address redefinitions
830 for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
831 KE = OldVN->kills.end(); KI != KE; ++KI) {
832 assert(!KI->isPHI() &&
833 "VN previously reported having no PHI kills.");
834 MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
835 unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
836 if (DefIdx == ~0U) continue;
837 if (MI->isRegTiedToUseOperand(DefIdx)) {
839 CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
840 if (NextVN == OldVN) continue;
841 Stack.push_back(NextVN);
846 // Create the new vreg
847 unsigned NewVReg = MRI->createVirtualRegister(MRI->getRegClass(CurrLI->reg));
849 // Create the new live interval
850 LiveInterval& NewLI = LIs->getOrCreateInterval(NewVReg);
852 for (SmallVector<VNInfo*, 4>::iterator OI = VNsToCopy.begin(), OE =
853 VNsToCopy.end(); OI != OE; ++OI) {
856 // Copy the valno over
857 VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
858 NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
860 // Remove the valno from the old interval
861 CurrLI->removeValNo(OldVN);
864 // Rewrite defs and uses. This is done in two stages to avoid invalidating
866 SmallVector<std::pair<MachineInstr*, unsigned>, 8> OpsToChange;
868 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
869 E = MRI->reg_end(); I != E; ++I) {
870 MachineOperand& MO = I.getOperand();
871 SlotIndex InstrIdx = LIs->getInstructionIndex(&*I);
873 if ((MO.isUse() && NewLI.liveAt(InstrIdx.getUseIndex())) ||
874 (MO.isDef() && NewLI.liveAt(InstrIdx.getDefIndex())))
875 OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
878 for (SmallVector<std::pair<MachineInstr*, unsigned>, 8>::iterator I =
879 OpsToChange.begin(), E = OpsToChange.end(); I != E; ++I) {
880 MachineInstr* Inst = I->first;
881 unsigned OpIdx = I->second;
882 MachineOperand& MO = Inst->getOperand(OpIdx);
886 // Grow the VirtRegMap, since we've created a new vreg.
889 // The renumbered vreg shares a stack slot with the old register.
890 if (IntervalSSMap.count(CurrLI->reg))
891 IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
896 bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
898 MachineBasicBlock::iterator RestorePt,
899 SlotIndex RestoreIdx,
900 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
901 MachineBasicBlock& MBB = *RestorePt->getParent();
903 MachineBasicBlock::iterator KillPt = BarrierMBB->end();
905 if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
906 KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, KillIdx);
908 KillPt = findNextEmptySlot(DefMI->getParent(), DefMI, KillIdx);
910 if (KillPt == DefMI->getParent()->end())
913 TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI);
914 LIs->InsertMachineInstrInMaps(prior(RestorePt), RestoreIdx);
916 ReconstructLiveInterval(CurrLI);
917 SlotIndex RematIdx = LIs->getInstructionIndex(prior(RestorePt));
918 RematIdx = RematIdx.getDefIndex();
919 RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
926 MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
927 const TargetRegisterClass* RC,
929 MachineInstr* Barrier,
930 MachineBasicBlock* MBB,
932 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
933 MachineBasicBlock::iterator Pt = MBB->begin();
935 // Go top down if RefsInMBB is empty.
936 if (RefsInMBB.empty())
939 MachineBasicBlock::iterator FoldPt = Barrier;
940 while (&*FoldPt != DefMI && FoldPt != MBB->begin() &&
941 !RefsInMBB.count(FoldPt))
944 int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg, false);
948 SmallVector<unsigned, 1> Ops;
949 Ops.push_back(OpIdx);
951 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
954 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(vreg);
955 if (I != IntervalSSMap.end()) {
958 SS = MFI->CreateStackObject(RC->getSize(), RC->getAlignment());
961 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
965 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
966 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
969 IntervalSSMap[vreg] = SS;
970 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
971 if (CurrSLI->hasAtLeastOneValue())
972 CurrSValNo = CurrSLI->getValNumInfo(0);
974 CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
975 LSs->getVNInfoAllocator());
981 MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
982 const TargetRegisterClass* RC,
983 MachineInstr* Barrier,
984 MachineBasicBlock* MBB,
986 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
987 if ((int)RestoreFoldLimit != -1 && RestoreFoldLimit == (int)NumRestoreFolds)
990 // Go top down if RefsInMBB is empty.
991 if (RefsInMBB.empty())
994 // Can't fold a restore between a call stack setup and teardown.
995 MachineBasicBlock::iterator FoldPt = Barrier;
997 // Advance from barrier to call frame teardown.
998 while (FoldPt != MBB->getFirstTerminator() &&
999 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
1000 if (RefsInMBB.count(FoldPt))
1006 if (FoldPt == MBB->getFirstTerminator())
1011 // Now find the restore point.
1012 while (FoldPt != MBB->getFirstTerminator() && !RefsInMBB.count(FoldPt)) {
1013 if (FoldPt->getOpcode() == TRI->getCallFrameSetupOpcode()) {
1014 while (FoldPt != MBB->getFirstTerminator() &&
1015 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
1016 if (RefsInMBB.count(FoldPt))
1022 if (FoldPt == MBB->getFirstTerminator())
1029 if (FoldPt == MBB->getFirstTerminator())
1032 int OpIdx = FoldPt->findRegisterUseOperandIdx(vreg, true);
1036 SmallVector<unsigned, 1> Ops;
1037 Ops.push_back(OpIdx);
1039 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
1042 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
1046 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
1047 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
1054 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
1055 /// so it would not cross the barrier that's being processed. Shrink wrap
1056 /// (minimize) the live interval to the last uses.
1057 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
1058 DEBUG(errs() << "Pre-alloc splitting " << LI->reg << " for " << *Barrier
1063 // Find live range where current interval cross the barrier.
1064 LiveInterval::iterator LR =
1065 CurrLI->FindLiveRangeContaining(BarrierIdx.getUseIndex());
1066 VNInfo *ValNo = LR->valno;
1068 assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
1070 MachineInstr *DefMI = ValNo->isDefAccurate()
1071 ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
1073 // If this would create a new join point, do not split.
1074 if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
1075 DEBUG(errs() << "FAILED (would create a new join point).\n");
1079 // Find all references in the barrier mbb.
1080 SmallPtrSet<MachineInstr*, 4> RefsInMBB;
1081 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
1082 E = MRI->reg_end(); I != E; ++I) {
1083 MachineInstr *RefMI = &*I;
1084 if (RefMI->getParent() == BarrierMBB)
1085 RefsInMBB.insert(RefMI);
1088 // Find a point to restore the value after the barrier.
1089 SlotIndex RestoreIndex;
1090 MachineBasicBlock::iterator RestorePt =
1091 findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB, RestoreIndex);
1092 if (RestorePt == BarrierMBB->end()) {
1093 DEBUG(errs() << "FAILED (could not find a suitable restore point).\n");
1097 if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
1098 if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt,
1099 RestoreIndex, RefsInMBB)) {
1100 DEBUG(errs() << "success (remat).\n");
1104 // Add a spill either before the barrier or after the definition.
1105 MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
1106 const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
1107 SlotIndex SpillIndex;
1108 MachineInstr *SpillMI = NULL;
1110 if (!ValNo->isDefAccurate()) {
1111 // If we don't know where the def is we must split just before the barrier.
1112 if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
1113 BarrierMBB, SS, RefsInMBB))) {
1114 SpillIndex = LIs->getInstructionIndex(SpillMI);
1116 MachineBasicBlock::iterator SpillPt =
1117 findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB, SpillIndex);
1118 if (SpillPt == BarrierMBB->begin()) {
1119 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1120 return false; // No gap to insert spill.
1124 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1125 TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC);
1126 SpillMI = prior(SpillPt);
1127 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
1129 } else if (!IsAvailableInStack(DefMBB, CurrLI->reg, ValNo->def,
1130 RestoreIndex, SpillIndex, SS)) {
1131 // If it's already split, just restore the value. There is no need to spill
1134 DEBUG(errs() << "FAILED (def is dead).\n");
1135 return false; // Def is dead. Do nothing.
1138 if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
1139 BarrierMBB, SS, RefsInMBB))) {
1140 SpillIndex = LIs->getInstructionIndex(SpillMI);
1142 // Check if it's possible to insert a spill after the def MI.
1143 MachineBasicBlock::iterator SpillPt;
1144 if (DefMBB == BarrierMBB) {
1145 // Add spill after the def and the last use before the barrier.
1146 SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
1147 RefsInMBB, SpillIndex);
1148 if (SpillPt == DefMBB->begin()) {
1149 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1150 return false; // No gap to insert spill.
1153 SpillPt = findNextEmptySlot(DefMBB, DefMI, SpillIndex);
1154 if (SpillPt == DefMBB->end()) {
1155 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1156 return false; // No gap to insert spill.
1160 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1161 TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg, false, SS, RC);
1162 SpillMI = prior(SpillPt);
1163 LIs->InsertMachineInstrInMaps(SpillMI, SpillIndex);
1167 // Remember def instruction index to spill index mapping.
1168 if (DefMI && SpillMI)
1169 Def2SpillMap[ValNo->def] = SpillIndex;
1172 bool FoldedRestore = false;
1173 if (MachineInstr* LMI = FoldRestore(CurrLI->reg, RC, Barrier,
1174 BarrierMBB, SS, RefsInMBB)) {
1176 RestoreIndex = LIs->getInstructionIndex(RestorePt);
1177 FoldedRestore = true;
1179 TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
1180 MachineInstr *LoadMI = prior(RestorePt);
1181 LIs->InsertMachineInstrInMaps(LoadMI, RestoreIndex);
1184 // Update spill stack slot live interval.
1185 UpdateSpillSlotInterval(ValNo, SpillIndex.getUseIndex().getNextSlot(),
1186 RestoreIndex.getDefIndex());
1188 ReconstructLiveInterval(CurrLI);
1190 if (!FoldedRestore) {
1191 SlotIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
1192 RestoreIdx = RestoreIdx.getDefIndex();
1193 RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
1197 DEBUG(errs() << "success.\n");
1201 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1202 /// barrier that's being processed.
1204 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
1205 SmallPtrSet<LiveInterval*, 8>& Split) {
1206 // First find all the virtual registers whose live intervals are intercepted
1207 // by the current barrier.
1208 SmallVector<LiveInterval*, 8> Intervals;
1209 for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
1210 // FIXME: If it's not safe to move any instruction that defines the barrier
1211 // register class, then it means there are some special dependencies which
1212 // codegen is not modelling. Ignore these barriers for now.
1213 if (!TII->isSafeToMoveRegClassDefs(*RC))
1215 std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
1216 for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
1217 unsigned Reg = VRs[i];
1218 if (!LIs->hasInterval(Reg))
1220 LiveInterval *LI = &LIs->getInterval(Reg);
1221 if (LI->liveAt(BarrierIdx) && !Barrier->readsRegister(Reg))
1222 // Virtual register live interval is intercepted by the barrier. We
1223 // should split and shrink wrap its interval if possible.
1224 Intervals.push_back(LI);
1228 // Process the affected live intervals.
1229 bool Change = false;
1230 while (!Intervals.empty()) {
1231 if (PreSplitLimit != -1 && (int)NumSplits == PreSplitLimit)
1233 LiveInterval *LI = Intervals.back();
1234 Intervals.pop_back();
1235 bool result = SplitRegLiveInterval(LI);
1236 if (result) Split.insert(LI);
1243 unsigned PreAllocSplitting::getNumberOfNonSpills(
1244 SmallPtrSet<MachineInstr*, 4>& MIs,
1245 unsigned Reg, int FrameIndex,
1246 bool& FeedsTwoAddr) {
1247 unsigned NonSpills = 0;
1248 for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
1250 int StoreFrameIndex;
1251 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1252 if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
1255 int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
1256 if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
1257 FeedsTwoAddr = true;
1263 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1264 /// split, and see if any of the spills are unnecessary. If so, remove them.
1265 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
1266 bool changed = false;
1268 // Walk over all of the live intervals that were touched by the splitter,
1269 // and see if we can do any DCE and/or folding.
1270 for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
1271 LE = split.end(); LI != LE; ++LI) {
1272 DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
1274 // First, collect all the uses of the vreg, and sort them by their
1275 // reaching definition (VNInfo).
1276 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
1277 UE = MRI->use_end(); UI != UE; ++UI) {
1278 SlotIndex index = LIs->getInstructionIndex(&*UI);
1279 index = index.getUseIndex();
1281 const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
1282 VNUseCount[LR->valno].insert(&*UI);
1285 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1286 // and/or fold them away.
1287 for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
1288 VE = (*LI)->vni_end(); VI != VE; ++VI) {
1290 if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit)
1293 VNInfo* CurrVN = *VI;
1295 // We don't currently try to handle definitions with PHI kills, because
1296 // it would involve processing more than one VNInfo at once.
1297 if (CurrVN->hasPHIKill()) continue;
1299 // We also don't try to handle the results of PHI joins, since there's
1300 // no defining instruction to analyze.
1301 if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
1303 // We're only interested in eliminating cruft introduced by the splitter,
1304 // is of the form load-use or load-use-store. First, check that the
1305 // definition is a load, and remember what stack slot we loaded it from.
1306 MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
1308 if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
1310 // If the definition has no uses at all, just DCE it.
1311 if (VNUseCount[CurrVN].size() == 0) {
1312 LIs->RemoveMachineInstrFromMaps(DefMI);
1313 (*LI)->removeValNo(CurrVN);
1314 DefMI->eraseFromParent();
1315 VNUseCount.erase(CurrVN);
1321 // Second, get the number of non-store uses of the definition, as well as
1322 // a flag indicating whether it feeds into a later two-address definition.
1323 bool FeedsTwoAddr = false;
1324 unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
1325 (*LI)->reg, FrameIndex,
1328 // If there's one non-store use and it doesn't feed a two-addr, then
1329 // this is a load-use-store case that we can try to fold.
1330 if (NonSpillCount == 1 && !FeedsTwoAddr) {
1331 // Start by finding the non-store use MachineInstr.
1332 SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
1333 int StoreFrameIndex;
1334 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1335 while (UI != VNUseCount[CurrVN].end() &&
1336 (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
1338 if (UI != VNUseCount[CurrVN].end())
1339 StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1341 if (UI == VNUseCount[CurrVN].end()) continue;
1343 MachineInstr* use = *UI;
1345 // Attempt to fold it away!
1346 int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
1347 if (OpIdx == -1) continue;
1348 SmallVector<unsigned, 1> Ops;
1349 Ops.push_back(OpIdx);
1350 if (!TII->canFoldMemoryOperand(use, Ops)) continue;
1352 MachineInstr* NewMI =
1353 TII->foldMemoryOperand(*use->getParent()->getParent(),
1354 use, Ops, FrameIndex);
1356 if (!NewMI) continue;
1358 // Update relevant analyses.
1359 LIs->RemoveMachineInstrFromMaps(DefMI);
1360 LIs->ReplaceMachineInstrInMaps(use, NewMI);
1361 (*LI)->removeValNo(CurrVN);
1363 DefMI->eraseFromParent();
1364 MachineBasicBlock* MBB = use->getParent();
1365 NewMI = MBB->insert(MBB->erase(use), NewMI);
1366 VNUseCount[CurrVN].erase(use);
1368 // Remove deleted instructions. Note that we need to remove them from
1369 // the VNInfo->use map as well, just to be safe.
1370 for (SmallPtrSet<MachineInstr*, 4>::iterator II =
1371 VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
1373 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1374 VNI = VNUseCount.begin(), VNE = VNUseCount.end(); VNI != VNE;
1376 if (VNI->first != CurrVN)
1377 VNI->second.erase(*II);
1378 LIs->RemoveMachineInstrFromMaps(*II);
1379 (*II)->eraseFromParent();
1382 VNUseCount.erase(CurrVN);
1384 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1385 VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
1386 if (VI->second.erase(use))
1387 VI->second.insert(NewMI);
1394 // If there's more than one non-store instruction, we can't profitably
1395 // fold it, so bail.
1396 if (NonSpillCount) continue;
1398 // Otherwise, this is a load-store case, so DCE them.
1399 for (SmallPtrSet<MachineInstr*, 4>::iterator UI =
1400 VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
1402 LIs->RemoveMachineInstrFromMaps(*UI);
1403 (*UI)->eraseFromParent();
1406 VNUseCount.erase(CurrVN);
1408 LIs->RemoveMachineInstrFromMaps(DefMI);
1409 (*LI)->removeValNo(CurrVN);
1410 DefMI->eraseFromParent();
1419 bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
1420 MachineBasicBlock* DefMBB,
1421 MachineBasicBlock* BarrierMBB) {
1422 if (DefMBB == BarrierMBB)
1425 if (LR->valno->hasPHIKill())
1428 SlotIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
1429 if (LR->end < MBBEnd)
1432 MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
1433 if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
1436 MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
1437 SmallPtrSet<MachineBasicBlock*, 4> Visited;
1438 typedef std::pair<MachineBasicBlock*,
1439 MachineBasicBlock::succ_iterator> ItPair;
1440 SmallVector<ItPair, 4> Stack;
1441 Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
1443 while (!Stack.empty()) {
1444 ItPair P = Stack.back();
1447 MachineBasicBlock* PredMBB = P.first;
1448 MachineBasicBlock::succ_iterator S = P.second;
1450 if (S == PredMBB->succ_end())
1452 else if (Visited.count(*S)) {
1453 Stack.push_back(std::make_pair(PredMBB, ++S));
1456 Stack.push_back(std::make_pair(PredMBB, S+1));
1458 MachineBasicBlock* MBB = *S;
1459 Visited.insert(MBB);
1461 if (MBB == BarrierMBB)
1464 MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
1465 MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
1466 MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
1468 if (MDTN == DefMDTN)
1470 else if (MDTN == BarrierMDTN)
1472 MDTN = MDTN->getIDom();
1475 MBBEnd = LIs->getMBBEndIdx(MBB);
1476 if (LR->end > MBBEnd)
1477 Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
1484 bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
1486 TM = &MF.getTarget();
1487 TRI = TM->getRegisterInfo();
1488 TII = TM->getInstrInfo();
1489 MFI = MF.getFrameInfo();
1490 MRI = &MF.getRegInfo();
1491 SIs = &getAnalysis<SlotIndexes>();
1492 LIs = &getAnalysis<LiveIntervals>();
1493 LSs = &getAnalysis<LiveStacks>();
1494 VRM = &getAnalysis<VirtRegMap>();
1496 bool MadeChange = false;
1498 // Make sure blocks are numbered in order.
1499 MF.RenumberBlocks();
1501 MachineBasicBlock *Entry = MF.begin();
1502 SmallPtrSet<MachineBasicBlock*,16> Visited;
1504 SmallPtrSet<LiveInterval*, 8> Split;
1506 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
1507 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1510 for (MachineBasicBlock::iterator I = BarrierMBB->begin(),
1511 E = BarrierMBB->end(); I != E; ++I) {
1513 const TargetRegisterClass **BarrierRCs =
1514 Barrier->getDesc().getRegClassBarriers();
1517 BarrierIdx = LIs->getInstructionIndex(Barrier);
1518 MadeChange |= SplitRegLiveIntervals(BarrierRCs, Split);
1522 MadeChange |= removeDeadSpills(Split);