1 //===-- PreAllocSplitting.cpp - Pre-allocation Interval Spltting Pass. ----===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the machine instruction level pre-register allocation
11 // live interval splitting pass. It finds live interval barriers, i.e.
12 // instructions which will kill all physical registers in certain register
13 // classes, and split all live intervals which cross the barrier.
15 //===----------------------------------------------------------------------===//
17 #define DEBUG_TYPE "pre-alloc-split"
18 #include "VirtRegMap.h"
19 #include "llvm/CodeGen/CalcSpillWeights.h"
20 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
21 #include "llvm/CodeGen/LiveStackAnalysis.h"
22 #include "llvm/CodeGen/MachineDominators.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineFunctionPass.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/CodeGen/RegisterCoalescer.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Target/TargetOptions.h"
32 #include "llvm/Target/TargetRegisterInfo.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/ErrorHandling.h"
36 #include "llvm/ADT/DenseMap.h"
37 #include "llvm/ADT/DepthFirstIterator.h"
38 #include "llvm/ADT/SmallPtrSet.h"
39 #include "llvm/ADT/Statistic.h"
42 static cl::opt<int> PreSplitLimit("pre-split-limit", cl::init(-1), cl::Hidden);
43 static cl::opt<int> DeadSplitLimit("dead-split-limit", cl::init(-1),
45 static cl::opt<int> RestoreFoldLimit("restore-fold-limit", cl::init(-1),
48 STATISTIC(NumSplits, "Number of intervals split");
49 STATISTIC(NumRemats, "Number of intervals split by rematerialization");
50 STATISTIC(NumFolds, "Number of intervals split with spill folding");
51 STATISTIC(NumRestoreFolds, "Number of intervals split with restore folding");
52 STATISTIC(NumRenumbers, "Number of intervals renumbered into new registers");
53 STATISTIC(NumDeadSpills, "Number of dead spills removed");
56 class PreAllocSplitting : public MachineFunctionPass {
57 MachineFunction *CurrMF;
58 const TargetMachine *TM;
59 const TargetInstrInfo *TII;
60 const TargetRegisterInfo* TRI;
61 MachineFrameInfo *MFI;
62 MachineRegisterInfo *MRI;
68 // Barrier - Current barrier being processed.
69 MachineInstr *Barrier;
71 // BarrierMBB - Basic block where the barrier resides in.
72 MachineBasicBlock *BarrierMBB;
74 // Barrier - Current barrier index.
77 // CurrLI - Current live interval being split.
80 // CurrSLI - Current stack slot live interval.
81 LiveInterval *CurrSLI;
83 // CurrSValNo - Current val# for the stack slot live interval.
86 // IntervalSSMap - A map from live interval to spill slots.
87 DenseMap<unsigned, int> IntervalSSMap;
89 // Def2SpillMap - A map from a def instruction index to spill index.
90 DenseMap<SlotIndex, SlotIndex> Def2SpillMap;
95 : MachineFunctionPass(&ID) {}
97 virtual bool runOnMachineFunction(MachineFunction &MF);
99 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
100 AU.setPreservesCFG();
101 AU.addRequired<SlotIndexes>();
102 AU.addPreserved<SlotIndexes>();
103 AU.addRequired<LiveIntervals>();
104 AU.addPreserved<LiveIntervals>();
105 AU.addRequired<LiveStacks>();
106 AU.addPreserved<LiveStacks>();
107 AU.addPreserved<RegisterCoalescer>();
108 AU.addPreserved<CalculateSpillWeights>();
110 AU.addPreservedID(StrongPHIEliminationID);
112 AU.addPreservedID(PHIEliminationID);
113 AU.addRequired<MachineDominatorTree>();
114 AU.addRequired<MachineLoopInfo>();
115 AU.addRequired<VirtRegMap>();
116 AU.addPreserved<MachineDominatorTree>();
117 AU.addPreserved<MachineLoopInfo>();
118 AU.addPreserved<VirtRegMap>();
119 MachineFunctionPass::getAnalysisUsage(AU);
122 virtual void releaseMemory() {
123 IntervalSSMap.clear();
124 Def2SpillMap.clear();
127 virtual const char *getPassName() const {
128 return "Pre-Register Allocaton Live Interval Splitting";
131 /// print - Implement the dump method.
132 virtual void print(raw_ostream &O, const Module* M = 0) const {
139 MachineBasicBlock::iterator
140 findSpillPoint(MachineBasicBlock*, MachineInstr*, MachineInstr*,
141 SmallPtrSet<MachineInstr*, 4>&);
143 MachineBasicBlock::iterator
144 findRestorePoint(MachineBasicBlock*, MachineInstr*, SlotIndex,
145 SmallPtrSet<MachineInstr*, 4>&);
147 int CreateSpillStackSlot(unsigned, const TargetRegisterClass *);
149 bool IsAvailableInStack(MachineBasicBlock*, unsigned,
150 SlotIndex, SlotIndex,
151 SlotIndex&, int&) const;
153 void UpdateSpillSlotInterval(VNInfo*, SlotIndex, SlotIndex);
155 bool SplitRegLiveInterval(LiveInterval*);
157 bool SplitRegLiveIntervals(const TargetRegisterClass **,
158 SmallPtrSet<LiveInterval*, 8>&);
160 bool createsNewJoin(LiveRange* LR, MachineBasicBlock* DefMBB,
161 MachineBasicBlock* BarrierMBB);
162 bool Rematerialize(unsigned vreg, VNInfo* ValNo,
164 MachineBasicBlock::iterator RestorePt,
165 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
166 MachineInstr* FoldSpill(unsigned vreg, const TargetRegisterClass* RC,
168 MachineInstr* Barrier,
169 MachineBasicBlock* MBB,
171 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
172 MachineInstr* FoldRestore(unsigned vreg,
173 const TargetRegisterClass* RC,
174 MachineInstr* Barrier,
175 MachineBasicBlock* MBB,
177 SmallPtrSet<MachineInstr*, 4>& RefsInMBB);
178 void RenumberValno(VNInfo* VN);
179 void ReconstructLiveInterval(LiveInterval* LI);
180 bool removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split);
181 unsigned getNumberOfNonSpills(SmallPtrSet<MachineInstr*, 4>& MIs,
182 unsigned Reg, int FrameIndex, bool& TwoAddr);
183 VNInfo* PerformPHIConstruction(MachineBasicBlock::iterator Use,
184 MachineBasicBlock* MBB, LiveInterval* LI,
185 SmallPtrSet<MachineInstr*, 4>& Visited,
186 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
187 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
188 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
189 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
190 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
191 bool IsTopLevel, bool IsIntraBlock);
192 VNInfo* PerformPHIConstructionFallBack(MachineBasicBlock::iterator Use,
193 MachineBasicBlock* MBB, LiveInterval* LI,
194 SmallPtrSet<MachineInstr*, 4>& Visited,
195 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
196 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
197 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
198 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
199 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
200 bool IsTopLevel, bool IsIntraBlock);
202 } // end anonymous namespace
204 char PreAllocSplitting::ID = 0;
206 static RegisterPass<PreAllocSplitting>
207 X("pre-alloc-splitting", "Pre-Register Allocation Live Interval Splitting");
209 const PassInfo *const llvm::PreAllocSplittingID = &X;
211 /// findSpillPoint - Find a gap as far away from the given MI that's suitable
212 /// for spilling the current live interval. The index must be before any
213 /// defs and uses of the live interval register in the mbb. Return begin() if
215 MachineBasicBlock::iterator
216 PreAllocSplitting::findSpillPoint(MachineBasicBlock *MBB, MachineInstr *MI,
218 SmallPtrSet<MachineInstr*, 4> &RefsInMBB) {
219 MachineBasicBlock::iterator Pt = MBB->begin();
221 MachineBasicBlock::iterator MII = MI;
222 MachineBasicBlock::iterator EndPt = DefMI
223 ? MachineBasicBlock::iterator(DefMI) : MBB->begin();
225 while (MII != EndPt && !RefsInMBB.count(MII) &&
226 MII->getOpcode() != TRI->getCallFrameSetupOpcode())
228 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
230 while (MII != EndPt && !RefsInMBB.count(MII)) {
231 // We can't insert the spill between the barrier (a call), and its
232 // corresponding call frame setup.
233 if (MII->getOpcode() == TRI->getCallFrameDestroyOpcode()) {
234 while (MII->getOpcode() != TRI->getCallFrameSetupOpcode()) {
245 if (RefsInMBB.count(MII))
255 /// findRestorePoint - Find a gap in the instruction index map that's suitable
256 /// for restoring the current live interval value. The index must be before any
257 /// uses of the live interval register in the mbb. Return end() if none is
259 MachineBasicBlock::iterator
260 PreAllocSplitting::findRestorePoint(MachineBasicBlock *MBB, MachineInstr *MI,
262 SmallPtrSet<MachineInstr*, 4> &RefsInMBB) {
263 // FIXME: Allow spill to be inserted to the beginning of the mbb. Update mbb
264 // begin index accordingly.
265 MachineBasicBlock::iterator Pt = MBB->end();
266 MachineBasicBlock::iterator EndPt = MBB->getFirstTerminator();
268 // We start at the call, so walk forward until we find the call frame teardown
269 // since we can't insert restores before that. Bail if we encounter a use
271 MachineBasicBlock::iterator MII = MI;
272 if (MII == EndPt) return Pt;
274 while (MII != EndPt && !RefsInMBB.count(MII) &&
275 MII->getOpcode() != TRI->getCallFrameDestroyOpcode())
277 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
280 // FIXME: Limit the number of instructions to examine to reduce
282 while (MII != EndPt) {
283 SlotIndex Index = LIs->getInstructionIndex(MII);
287 // We can't insert a restore between the barrier (a call) and its
288 // corresponding call frame teardown.
289 if (MII->getOpcode() == TRI->getCallFrameSetupOpcode()) {
291 if (MII == EndPt || RefsInMBB.count(MII)) return Pt;
293 } while (MII->getOpcode() != TRI->getCallFrameDestroyOpcode());
298 if (RefsInMBB.count(MII))
307 /// CreateSpillStackSlot - Create a stack slot for the live interval being
308 /// split. If the live interval was previously split, just reuse the same
310 int PreAllocSplitting::CreateSpillStackSlot(unsigned Reg,
311 const TargetRegisterClass *RC) {
313 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(Reg);
314 if (I != IntervalSSMap.end()) {
317 SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
318 IntervalSSMap[Reg] = SS;
321 // Create live interval for stack slot.
322 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
323 if (CurrSLI->hasAtLeastOneValue())
324 CurrSValNo = CurrSLI->getValNumInfo(0);
326 CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
327 LSs->getVNInfoAllocator());
331 /// IsAvailableInStack - Return true if register is available in a split stack
332 /// slot at the specified index.
334 PreAllocSplitting::IsAvailableInStack(MachineBasicBlock *DefMBB,
335 unsigned Reg, SlotIndex DefIndex,
336 SlotIndex RestoreIndex,
337 SlotIndex &SpillIndex,
342 DenseMap<unsigned, int>::const_iterator I = IntervalSSMap.find(Reg);
343 if (I == IntervalSSMap.end())
345 DenseMap<SlotIndex, SlotIndex>::const_iterator
346 II = Def2SpillMap.find(DefIndex);
347 if (II == Def2SpillMap.end())
350 // If last spill of def is in the same mbb as barrier mbb (where restore will
351 // be), make sure it's not below the intended restore index.
352 // FIXME: Undo the previous spill?
353 assert(LIs->getMBBFromIndex(II->second) == DefMBB);
354 if (DefMBB == BarrierMBB && II->second >= RestoreIndex)
358 SpillIndex = II->second;
362 /// UpdateSpillSlotInterval - Given the specified val# of the register live
363 /// interval being split, and the spill and restore indicies, update the live
364 /// interval of the spill stack slot.
366 PreAllocSplitting::UpdateSpillSlotInterval(VNInfo *ValNo, SlotIndex SpillIndex,
367 SlotIndex RestoreIndex) {
368 assert(LIs->getMBBFromIndex(RestoreIndex) == BarrierMBB &&
369 "Expect restore in the barrier mbb");
371 MachineBasicBlock *MBB = LIs->getMBBFromIndex(SpillIndex);
372 if (MBB == BarrierMBB) {
373 // Intra-block spill + restore. We are done.
374 LiveRange SLR(SpillIndex, RestoreIndex, CurrSValNo);
375 CurrSLI->addRange(SLR);
379 SmallPtrSet<MachineBasicBlock*, 4> Processed;
380 SlotIndex EndIdx = LIs->getMBBEndIdx(MBB);
381 LiveRange SLR(SpillIndex, EndIdx, CurrSValNo);
382 CurrSLI->addRange(SLR);
383 Processed.insert(MBB);
385 // Start from the spill mbb, figure out the extend of the spill slot's
387 SmallVector<MachineBasicBlock*, 4> WorkList;
388 const LiveRange *LR = CurrLI->getLiveRangeContaining(SpillIndex);
389 if (LR->end > EndIdx)
390 // If live range extend beyond end of mbb, add successors to work list.
391 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
392 SE = MBB->succ_end(); SI != SE; ++SI)
393 WorkList.push_back(*SI);
395 while (!WorkList.empty()) {
396 MachineBasicBlock *MBB = WorkList.back();
398 if (Processed.count(MBB))
400 SlotIndex Idx = LIs->getMBBStartIdx(MBB);
401 LR = CurrLI->getLiveRangeContaining(Idx);
402 if (LR && LR->valno == ValNo) {
403 EndIdx = LIs->getMBBEndIdx(MBB);
404 if (Idx <= RestoreIndex && RestoreIndex < EndIdx) {
405 // Spill slot live interval stops at the restore.
406 LiveRange SLR(Idx, RestoreIndex, CurrSValNo);
407 CurrSLI->addRange(SLR);
408 } else if (LR->end > EndIdx) {
409 // Live range extends beyond end of mbb, process successors.
410 LiveRange SLR(Idx, EndIdx.getNextIndex(), CurrSValNo);
411 CurrSLI->addRange(SLR);
412 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
413 SE = MBB->succ_end(); SI != SE; ++SI)
414 WorkList.push_back(*SI);
416 LiveRange SLR(Idx, LR->end, CurrSValNo);
417 CurrSLI->addRange(SLR);
419 Processed.insert(MBB);
424 /// PerformPHIConstruction - From properly set up use and def lists, use a PHI
425 /// construction algorithm to compute the ranges and valnos for an interval.
427 PreAllocSplitting::PerformPHIConstruction(MachineBasicBlock::iterator UseI,
428 MachineBasicBlock* MBB, LiveInterval* LI,
429 SmallPtrSet<MachineInstr*, 4>& Visited,
430 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
431 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
432 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
433 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
434 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
435 bool IsTopLevel, bool IsIntraBlock) {
436 // Return memoized result if it's available.
437 if (IsTopLevel && Visited.count(UseI) && NewVNs.count(UseI))
439 else if (!IsTopLevel && IsIntraBlock && NewVNs.count(UseI))
441 else if (!IsIntraBlock && LiveOut.count(MBB))
444 // Check if our block contains any uses or defs.
445 bool ContainsDefs = Defs.count(MBB);
446 bool ContainsUses = Uses.count(MBB);
450 // Enumerate the cases of use/def contaning blocks.
451 if (!ContainsDefs && !ContainsUses) {
452 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
453 NewVNs, LiveOut, Phis,
454 IsTopLevel, IsIntraBlock);
455 } else if (ContainsDefs && !ContainsUses) {
456 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
458 // Search for the def in this block. If we don't find it before the
459 // instruction we care about, go to the fallback case. Note that that
460 // should never happen: this cannot be intrablock, so use should
461 // always be an end() iterator.
462 assert(UseI == MBB->end() && "No use marked in intrablock");
464 MachineBasicBlock::iterator Walker = UseI;
466 while (Walker != MBB->begin()) {
467 if (BlockDefs.count(Walker))
472 // Once we've found it, extend its VNInfo to our instruction.
473 SlotIndex DefIndex = LIs->getInstructionIndex(Walker);
474 DefIndex = DefIndex.getDefIndex();
475 SlotIndex EndIndex = LIs->getMBBEndIdx(MBB);
477 RetVNI = NewVNs[Walker];
478 LI->addRange(LiveRange(DefIndex, EndIndex, RetVNI));
479 } else if (!ContainsDefs && ContainsUses) {
480 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
482 // Search for the use in this block that precedes the instruction we care
483 // about, going to the fallback case if we don't find it.
484 if (UseI == MBB->begin())
485 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
486 Uses, NewVNs, LiveOut, Phis,
487 IsTopLevel, IsIntraBlock);
489 MachineBasicBlock::iterator Walker = UseI;
492 while (Walker != MBB->begin()) {
493 if (BlockUses.count(Walker)) {
500 // Must check begin() too.
502 if (BlockUses.count(Walker))
505 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
506 Uses, NewVNs, LiveOut, Phis,
507 IsTopLevel, IsIntraBlock);
510 SlotIndex UseIndex = LIs->getInstructionIndex(Walker);
511 UseIndex = UseIndex.getUseIndex();
514 EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
516 EndIndex = LIs->getMBBEndIdx(MBB);
518 // Now, recursively phi construct the VNInfo for the use we found,
519 // and then extend it to include the instruction we care about
520 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
521 NewVNs, LiveOut, Phis, false, true);
523 LI->addRange(LiveRange(UseIndex, EndIndex, RetVNI));
525 // FIXME: Need to set kills properly for inter-block stuff.
526 if (RetVNI->isKill(UseIndex)) RetVNI->removeKill(UseIndex);
528 RetVNI->addKill(EndIndex);
529 } else if (ContainsDefs && ContainsUses) {
530 SmallPtrSet<MachineInstr*, 2>& BlockDefs = Defs[MBB];
531 SmallPtrSet<MachineInstr*, 2>& BlockUses = Uses[MBB];
533 // This case is basically a merging of the two preceding case, with the
534 // special note that checking for defs must take precedence over checking
535 // for uses, because of two-address instructions.
537 if (UseI == MBB->begin())
538 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs, Uses,
539 NewVNs, LiveOut, Phis,
540 IsTopLevel, IsIntraBlock);
542 MachineBasicBlock::iterator Walker = UseI;
544 bool foundDef = false;
545 bool foundUse = false;
546 while (Walker != MBB->begin()) {
547 if (BlockDefs.count(Walker)) {
550 } else if (BlockUses.count(Walker)) {
557 // Must check begin() too.
558 if (!foundDef && !foundUse) {
559 if (BlockDefs.count(Walker))
561 else if (BlockUses.count(Walker))
564 return PerformPHIConstructionFallBack(UseI, MBB, LI, Visited, Defs,
565 Uses, NewVNs, LiveOut, Phis,
566 IsTopLevel, IsIntraBlock);
569 SlotIndex StartIndex = LIs->getInstructionIndex(Walker);
570 StartIndex = foundDef ? StartIndex.getDefIndex() : StartIndex.getUseIndex();
573 EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
575 EndIndex = LIs->getMBBEndIdx(MBB);
578 RetVNI = NewVNs[Walker];
580 RetVNI = PerformPHIConstruction(Walker, MBB, LI, Visited, Defs, Uses,
581 NewVNs, LiveOut, Phis, false, true);
583 LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
585 if (foundUse && RetVNI->isKill(StartIndex))
586 RetVNI->removeKill(StartIndex);
588 RetVNI->addKill(EndIndex);
592 // Memoize results so we don't have to recompute them.
593 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
595 if (!NewVNs.count(UseI))
596 NewVNs[UseI] = RetVNI;
597 Visited.insert(UseI);
603 /// PerformPHIConstructionFallBack - PerformPHIConstruction fall back path.
606 PreAllocSplitting::PerformPHIConstructionFallBack(MachineBasicBlock::iterator UseI,
607 MachineBasicBlock* MBB, LiveInterval* LI,
608 SmallPtrSet<MachineInstr*, 4>& Visited,
609 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Defs,
610 DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> >& Uses,
611 DenseMap<MachineInstr*, VNInfo*>& NewVNs,
612 DenseMap<MachineBasicBlock*, VNInfo*>& LiveOut,
613 DenseMap<MachineBasicBlock*, VNInfo*>& Phis,
614 bool IsTopLevel, bool IsIntraBlock) {
615 // NOTE: Because this is the fallback case from other cases, we do NOT
616 // assume that we are not intrablock here.
617 if (Phis.count(MBB)) return Phis[MBB];
619 SlotIndex StartIndex = LIs->getMBBStartIdx(MBB);
620 VNInfo *RetVNI = Phis[MBB] =
621 LI->getNextValue(SlotIndex(), /*FIXME*/ 0, false,
622 LIs->getVNInfoAllocator());
624 if (!IsIntraBlock) LiveOut[MBB] = RetVNI;
626 // If there are no uses or defs between our starting point and the
627 // beginning of the block, then recursive perform phi construction
628 // on our predecessors.
629 DenseMap<MachineBasicBlock*, VNInfo*> IncomingVNs;
630 for (MachineBasicBlock::pred_iterator PI = MBB->pred_begin(),
631 PE = MBB->pred_end(); PI != PE; ++PI) {
632 VNInfo* Incoming = PerformPHIConstruction((*PI)->end(), *PI, LI,
633 Visited, Defs, Uses, NewVNs,
634 LiveOut, Phis, false, false);
636 IncomingVNs[*PI] = Incoming;
639 if (MBB->pred_size() == 1 && !RetVNI->hasPHIKill()) {
640 VNInfo* OldVN = RetVNI;
641 VNInfo* NewVN = IncomingVNs.begin()->second;
642 VNInfo* MergedVN = LI->MergeValueNumberInto(OldVN, NewVN);
643 if (MergedVN == OldVN) std::swap(OldVN, NewVN);
645 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator LOI = LiveOut.begin(),
646 LOE = LiveOut.end(); LOI != LOE; ++LOI)
647 if (LOI->second == OldVN)
648 LOI->second = MergedVN;
649 for (DenseMap<MachineInstr*, VNInfo*>::iterator NVI = NewVNs.begin(),
650 NVE = NewVNs.end(); NVI != NVE; ++NVI)
651 if (NVI->second == OldVN)
652 NVI->second = MergedVN;
653 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator PI = Phis.begin(),
654 PE = Phis.end(); PI != PE; ++PI)
655 if (PI->second == OldVN)
656 PI->second = MergedVN;
659 // Otherwise, merge the incoming VNInfos with a phi join. Create a new
660 // VNInfo to represent the joined value.
661 for (DenseMap<MachineBasicBlock*, VNInfo*>::iterator I =
662 IncomingVNs.begin(), E = IncomingVNs.end(); I != E; ++I) {
663 I->second->setHasPHIKill(true);
664 SlotIndex KillIndex(LIs->getMBBEndIdx(I->first), true);
665 if (!I->second->isKill(KillIndex))
666 I->second->addKill(KillIndex);
672 EndIndex = LIs->getInstructionIndex(UseI).getDefIndex();
674 EndIndex = LIs->getMBBEndIdx(MBB);
675 LI->addRange(LiveRange(StartIndex, EndIndex, RetVNI));
677 RetVNI->addKill(EndIndex);
679 // Memoize results so we don't have to recompute them.
681 LiveOut[MBB] = RetVNI;
683 if (!NewVNs.count(UseI))
684 NewVNs[UseI] = RetVNI;
685 Visited.insert(UseI);
691 /// ReconstructLiveInterval - Recompute a live interval from scratch.
692 void PreAllocSplitting::ReconstructLiveInterval(LiveInterval* LI) {
693 BumpPtrAllocator& Alloc = LIs->getVNInfoAllocator();
695 // Clear the old ranges and valnos;
698 // Cache the uses and defs of the register
699 typedef DenseMap<MachineBasicBlock*, SmallPtrSet<MachineInstr*, 2> > RegMap;
702 // Keep track of the new VNs we're creating.
703 DenseMap<MachineInstr*, VNInfo*> NewVNs;
704 SmallPtrSet<VNInfo*, 2> PhiVNs;
706 // Cache defs, and create a new VNInfo for each def.
707 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
708 DE = MRI->def_end(); DI != DE; ++DI) {
709 Defs[(*DI).getParent()].insert(&*DI);
711 SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
712 DefIdx = DefIdx.getDefIndex();
714 assert(DI->getOpcode() != TargetInstrInfo::PHI &&
715 "PHI instr in code during pre-alloc splitting.");
716 VNInfo* NewVN = LI->getNextValue(DefIdx, 0, true, Alloc);
718 // If the def is a move, set the copy field.
719 unsigned SrcReg, DstReg, SrcSubIdx, DstSubIdx;
720 if (TII->isMoveInstr(*DI, SrcReg, DstReg, SrcSubIdx, DstSubIdx))
721 if (DstReg == LI->reg)
722 NewVN->setCopy(&*DI);
724 NewVNs[&*DI] = NewVN;
727 // Cache uses as a separate pass from actually processing them.
728 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
729 UE = MRI->use_end(); UI != UE; ++UI)
730 Uses[(*UI).getParent()].insert(&*UI);
732 // Now, actually process every use and use a phi construction algorithm
733 // to walk from it to its reaching definitions, building VNInfos along
735 DenseMap<MachineBasicBlock*, VNInfo*> LiveOut;
736 DenseMap<MachineBasicBlock*, VNInfo*> Phis;
737 SmallPtrSet<MachineInstr*, 4> Visited;
738 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin(LI->reg),
739 UE = MRI->use_end(); UI != UE; ++UI) {
740 PerformPHIConstruction(&*UI, UI->getParent(), LI, Visited, Defs,
741 Uses, NewVNs, LiveOut, Phis, true, true);
744 // Add ranges for dead defs
745 for (MachineRegisterInfo::def_iterator DI = MRI->def_begin(LI->reg),
746 DE = MRI->def_end(); DI != DE; ++DI) {
747 SlotIndex DefIdx = LIs->getInstructionIndex(&*DI);
748 DefIdx = DefIdx.getDefIndex();
750 if (LI->liveAt(DefIdx)) continue;
752 VNInfo* DeadVN = NewVNs[&*DI];
753 LI->addRange(LiveRange(DefIdx, DefIdx.getNextSlot(), DeadVN));
754 DeadVN->addKill(DefIdx);
757 // Update kill markers.
758 for (LiveInterval::vni_iterator VI = LI->vni_begin(), VE = LI->vni_end();
761 for (unsigned i = 0, e = VNI->kills.size(); i != e; ++i) {
762 SlotIndex KillIdx = VNI->kills[i];
765 MachineInstr *KillMI = LIs->getInstructionFromIndex(KillIdx);
767 MachineOperand *KillMO = KillMI->findRegisterUseOperand(CurrLI->reg);
769 // It could be a dead def.
776 /// RenumberValno - Split the given valno out into a new vreg, allowing it to
777 /// be allocated to a different register. This function creates a new vreg,
778 /// copies the valno and its live ranges over to the new vreg's interval,
779 /// removes them from the old interval, and rewrites all uses and defs of
780 /// the original reg to the new vreg within those ranges.
781 void PreAllocSplitting::RenumberValno(VNInfo* VN) {
782 SmallVector<VNInfo*, 4> Stack;
783 SmallVector<VNInfo*, 4> VNsToCopy;
786 // Walk through and copy the valno we care about, and any other valnos
787 // that are two-address redefinitions of the one we care about. These
788 // will need to be rewritten as well. We also check for safety of the
789 // renumbering here, by making sure that none of the valno involved has
791 while (!Stack.empty()) {
792 VNInfo* OldVN = Stack.back();
795 // Bail out if we ever encounter a valno that has a PHI kill. We can't
797 if (OldVN->hasPHIKill()) return;
799 VNsToCopy.push_back(OldVN);
801 // Locate two-address redefinitions
802 for (VNInfo::KillSet::iterator KI = OldVN->kills.begin(),
803 KE = OldVN->kills.end(); KI != KE; ++KI) {
804 assert(!KI->isPHI() &&
805 "VN previously reported having no PHI kills.");
806 MachineInstr* MI = LIs->getInstructionFromIndex(*KI);
807 unsigned DefIdx = MI->findRegisterDefOperandIdx(CurrLI->reg);
808 if (DefIdx == ~0U) continue;
809 if (MI->isRegTiedToUseOperand(DefIdx)) {
811 CurrLI->findDefinedVNInfoForRegInt(KI->getDefIndex());
812 if (NextVN == OldVN) continue;
813 Stack.push_back(NextVN);
818 // Create the new vreg
819 unsigned NewVReg = MRI->createVirtualRegister(MRI->getRegClass(CurrLI->reg));
821 // Create the new live interval
822 LiveInterval& NewLI = LIs->getOrCreateInterval(NewVReg);
824 for (SmallVector<VNInfo*, 4>::iterator OI = VNsToCopy.begin(), OE =
825 VNsToCopy.end(); OI != OE; ++OI) {
828 // Copy the valno over
829 VNInfo* NewVN = NewLI.createValueCopy(OldVN, LIs->getVNInfoAllocator());
830 NewLI.MergeValueInAsValue(*CurrLI, OldVN, NewVN);
832 // Remove the valno from the old interval
833 CurrLI->removeValNo(OldVN);
836 // Rewrite defs and uses. This is done in two stages to avoid invalidating
838 SmallVector<std::pair<MachineInstr*, unsigned>, 8> OpsToChange;
840 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
841 E = MRI->reg_end(); I != E; ++I) {
842 MachineOperand& MO = I.getOperand();
843 SlotIndex InstrIdx = LIs->getInstructionIndex(&*I);
845 if ((MO.isUse() && NewLI.liveAt(InstrIdx.getUseIndex())) ||
846 (MO.isDef() && NewLI.liveAt(InstrIdx.getDefIndex())))
847 OpsToChange.push_back(std::make_pair(&*I, I.getOperandNo()));
850 for (SmallVector<std::pair<MachineInstr*, unsigned>, 8>::iterator I =
851 OpsToChange.begin(), E = OpsToChange.end(); I != E; ++I) {
852 MachineInstr* Inst = I->first;
853 unsigned OpIdx = I->second;
854 MachineOperand& MO = Inst->getOperand(OpIdx);
858 // Grow the VirtRegMap, since we've created a new vreg.
861 // The renumbered vreg shares a stack slot with the old register.
862 if (IntervalSSMap.count(CurrLI->reg))
863 IntervalSSMap[NewVReg] = IntervalSSMap[CurrLI->reg];
868 bool PreAllocSplitting::Rematerialize(unsigned VReg, VNInfo* ValNo,
870 MachineBasicBlock::iterator RestorePt,
871 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
872 MachineBasicBlock& MBB = *RestorePt->getParent();
874 MachineBasicBlock::iterator KillPt = BarrierMBB->end();
875 if (!ValNo->isDefAccurate() || DefMI->getParent() == BarrierMBB)
876 KillPt = findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
878 KillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
880 if (KillPt == DefMI->getParent()->end())
883 TII->reMaterialize(MBB, RestorePt, VReg, 0, DefMI, TRI);
884 SlotIndex RematIdx = LIs->InsertMachineInstrInMaps(prior(RestorePt));
886 ReconstructLiveInterval(CurrLI);
887 RematIdx = RematIdx.getDefIndex();
888 RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RematIdx));
895 MachineInstr* PreAllocSplitting::FoldSpill(unsigned vreg,
896 const TargetRegisterClass* RC,
898 MachineInstr* Barrier,
899 MachineBasicBlock* MBB,
901 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
902 // Go top down if RefsInMBB is empty.
903 if (RefsInMBB.empty())
906 MachineBasicBlock::iterator FoldPt = Barrier;
907 while (&*FoldPt != DefMI && FoldPt != MBB->begin() &&
908 !RefsInMBB.count(FoldPt))
911 int OpIdx = FoldPt->findRegisterDefOperandIdx(vreg, false);
915 SmallVector<unsigned, 1> Ops;
916 Ops.push_back(OpIdx);
918 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
921 DenseMap<unsigned, int>::iterator I = IntervalSSMap.find(vreg);
922 if (I != IntervalSSMap.end()) {
925 SS = MFI->CreateSpillStackObject(RC->getSize(), RC->getAlignment());
928 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
932 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
933 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
936 IntervalSSMap[vreg] = SS;
937 CurrSLI = &LSs->getOrCreateInterval(SS, RC);
938 if (CurrSLI->hasAtLeastOneValue())
939 CurrSValNo = CurrSLI->getValNumInfo(0);
941 CurrSValNo = CurrSLI->getNextValue(SlotIndex(), 0, false,
942 LSs->getVNInfoAllocator());
948 MachineInstr* PreAllocSplitting::FoldRestore(unsigned vreg,
949 const TargetRegisterClass* RC,
950 MachineInstr* Barrier,
951 MachineBasicBlock* MBB,
953 SmallPtrSet<MachineInstr*, 4>& RefsInMBB) {
954 if ((int)RestoreFoldLimit != -1 && RestoreFoldLimit == (int)NumRestoreFolds)
957 // Go top down if RefsInMBB is empty.
958 if (RefsInMBB.empty())
961 // Can't fold a restore between a call stack setup and teardown.
962 MachineBasicBlock::iterator FoldPt = Barrier;
964 // Advance from barrier to call frame teardown.
965 while (FoldPt != MBB->getFirstTerminator() &&
966 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
967 if (RefsInMBB.count(FoldPt))
973 if (FoldPt == MBB->getFirstTerminator())
978 // Now find the restore point.
979 while (FoldPt != MBB->getFirstTerminator() && !RefsInMBB.count(FoldPt)) {
980 if (FoldPt->getOpcode() == TRI->getCallFrameSetupOpcode()) {
981 while (FoldPt != MBB->getFirstTerminator() &&
982 FoldPt->getOpcode() != TRI->getCallFrameDestroyOpcode()) {
983 if (RefsInMBB.count(FoldPt))
989 if (FoldPt == MBB->getFirstTerminator())
996 if (FoldPt == MBB->getFirstTerminator())
999 int OpIdx = FoldPt->findRegisterUseOperandIdx(vreg, true);
1003 SmallVector<unsigned, 1> Ops;
1004 Ops.push_back(OpIdx);
1006 if (!TII->canFoldMemoryOperand(FoldPt, Ops))
1009 MachineInstr* FMI = TII->foldMemoryOperand(*MBB->getParent(),
1013 LIs->ReplaceMachineInstrInMaps(FoldPt, FMI);
1014 FMI = MBB->insert(MBB->erase(FoldPt), FMI);
1021 /// SplitRegLiveInterval - Split (spill and restore) the given live interval
1022 /// so it would not cross the barrier that's being processed. Shrink wrap
1023 /// (minimize) the live interval to the last uses.
1024 bool PreAllocSplitting::SplitRegLiveInterval(LiveInterval *LI) {
1025 DEBUG(errs() << "Pre-alloc splitting " << LI->reg << " for " << *Barrier
1030 // Find live range where current interval cross the barrier.
1031 LiveInterval::iterator LR =
1032 CurrLI->FindLiveRangeContaining(BarrierIdx.getUseIndex());
1033 VNInfo *ValNo = LR->valno;
1035 assert(!ValNo->isUnused() && "Val# is defined by a dead def?");
1037 MachineInstr *DefMI = ValNo->isDefAccurate()
1038 ? LIs->getInstructionFromIndex(ValNo->def) : NULL;
1040 // If this would create a new join point, do not split.
1041 if (DefMI && createsNewJoin(LR, DefMI->getParent(), Barrier->getParent())) {
1042 DEBUG(errs() << "FAILED (would create a new join point).\n");
1046 // Find all references in the barrier mbb.
1047 SmallPtrSet<MachineInstr*, 4> RefsInMBB;
1048 for (MachineRegisterInfo::reg_iterator I = MRI->reg_begin(CurrLI->reg),
1049 E = MRI->reg_end(); I != E; ++I) {
1050 MachineInstr *RefMI = &*I;
1051 if (RefMI->getParent() == BarrierMBB)
1052 RefsInMBB.insert(RefMI);
1055 // Find a point to restore the value after the barrier.
1056 MachineBasicBlock::iterator RestorePt =
1057 findRestorePoint(BarrierMBB, Barrier, LR->end, RefsInMBB);
1058 if (RestorePt == BarrierMBB->end()) {
1059 DEBUG(errs() << "FAILED (could not find a suitable restore point).\n");
1063 if (DefMI && LIs->isReMaterializable(*LI, ValNo, DefMI))
1064 if (Rematerialize(LI->reg, ValNo, DefMI, RestorePt, RefsInMBB)) {
1065 DEBUG(errs() << "success (remat).\n");
1069 // Add a spill either before the barrier or after the definition.
1070 MachineBasicBlock *DefMBB = DefMI ? DefMI->getParent() : NULL;
1071 const TargetRegisterClass *RC = MRI->getRegClass(CurrLI->reg);
1072 SlotIndex SpillIndex;
1073 MachineInstr *SpillMI = NULL;
1075 if (!ValNo->isDefAccurate()) {
1076 // If we don't know where the def is we must split just before the barrier.
1077 if ((SpillMI = FoldSpill(LI->reg, RC, 0, Barrier,
1078 BarrierMBB, SS, RefsInMBB))) {
1079 SpillIndex = LIs->getInstructionIndex(SpillMI);
1081 MachineBasicBlock::iterator SpillPt =
1082 findSpillPoint(BarrierMBB, Barrier, NULL, RefsInMBB);
1083 if (SpillPt == BarrierMBB->begin()) {
1084 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1085 return false; // No gap to insert spill.
1089 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1090 TII->storeRegToStackSlot(*BarrierMBB, SpillPt, CurrLI->reg, true, SS, RC);
1091 SpillMI = prior(SpillPt);
1092 SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
1094 } else if (!IsAvailableInStack(DefMBB, CurrLI->reg, ValNo->def,
1095 LIs->getZeroIndex(), SpillIndex, SS)) {
1096 // If it's already split, just restore the value. There is no need to spill
1099 DEBUG(errs() << "FAILED (def is dead).\n");
1100 return false; // Def is dead. Do nothing.
1103 if ((SpillMI = FoldSpill(LI->reg, RC, DefMI, Barrier,
1104 BarrierMBB, SS, RefsInMBB))) {
1105 SpillIndex = LIs->getInstructionIndex(SpillMI);
1107 // Check if it's possible to insert a spill after the def MI.
1108 MachineBasicBlock::iterator SpillPt;
1109 if (DefMBB == BarrierMBB) {
1110 // Add spill after the def and the last use before the barrier.
1111 SpillPt = findSpillPoint(BarrierMBB, Barrier, DefMI,
1113 if (SpillPt == DefMBB->begin()) {
1114 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1115 return false; // No gap to insert spill.
1118 SpillPt = llvm::next(MachineBasicBlock::iterator(DefMI));
1119 if (SpillPt == DefMBB->end()) {
1120 DEBUG(errs() << "FAILED (could not find a suitable spill point).\n");
1121 return false; // No gap to insert spill.
1125 SS = CreateSpillStackSlot(CurrLI->reg, RC);
1126 TII->storeRegToStackSlot(*DefMBB, SpillPt, CurrLI->reg, false, SS, RC);
1127 SpillMI = prior(SpillPt);
1128 SpillIndex = LIs->InsertMachineInstrInMaps(SpillMI);
1132 // Remember def instruction index to spill index mapping.
1133 if (DefMI && SpillMI)
1134 Def2SpillMap[ValNo->def] = SpillIndex;
1137 bool FoldedRestore = false;
1138 SlotIndex RestoreIndex;
1139 if (MachineInstr* LMI = FoldRestore(CurrLI->reg, RC, Barrier,
1140 BarrierMBB, SS, RefsInMBB)) {
1142 RestoreIndex = LIs->getInstructionIndex(RestorePt);
1143 FoldedRestore = true;
1145 TII->loadRegFromStackSlot(*BarrierMBB, RestorePt, CurrLI->reg, SS, RC);
1146 MachineInstr *LoadMI = prior(RestorePt);
1147 RestoreIndex = LIs->InsertMachineInstrInMaps(LoadMI);
1150 // Update spill stack slot live interval.
1151 UpdateSpillSlotInterval(ValNo, SpillIndex.getUseIndex().getNextSlot(),
1152 RestoreIndex.getDefIndex());
1154 ReconstructLiveInterval(CurrLI);
1156 if (!FoldedRestore) {
1157 SlotIndex RestoreIdx = LIs->getInstructionIndex(prior(RestorePt));
1158 RestoreIdx = RestoreIdx.getDefIndex();
1159 RenumberValno(CurrLI->findDefinedVNInfoForRegInt(RestoreIdx));
1163 DEBUG(errs() << "success.\n");
1167 /// SplitRegLiveIntervals - Split all register live intervals that cross the
1168 /// barrier that's being processed.
1170 PreAllocSplitting::SplitRegLiveIntervals(const TargetRegisterClass **RCs,
1171 SmallPtrSet<LiveInterval*, 8>& Split) {
1172 // First find all the virtual registers whose live intervals are intercepted
1173 // by the current barrier.
1174 SmallVector<LiveInterval*, 8> Intervals;
1175 for (const TargetRegisterClass **RC = RCs; *RC; ++RC) {
1176 // FIXME: If it's not safe to move any instruction that defines the barrier
1177 // register class, then it means there are some special dependencies which
1178 // codegen is not modelling. Ignore these barriers for now.
1179 if (!TII->isSafeToMoveRegClassDefs(*RC))
1181 std::vector<unsigned> &VRs = MRI->getRegClassVirtRegs(*RC);
1182 for (unsigned i = 0, e = VRs.size(); i != e; ++i) {
1183 unsigned Reg = VRs[i];
1184 if (!LIs->hasInterval(Reg))
1186 LiveInterval *LI = &LIs->getInterval(Reg);
1187 if (LI->liveAt(BarrierIdx) && !Barrier->readsRegister(Reg))
1188 // Virtual register live interval is intercepted by the barrier. We
1189 // should split and shrink wrap its interval if possible.
1190 Intervals.push_back(LI);
1194 // Process the affected live intervals.
1195 bool Change = false;
1196 while (!Intervals.empty()) {
1197 if (PreSplitLimit != -1 && (int)NumSplits == PreSplitLimit)
1199 LiveInterval *LI = Intervals.back();
1200 Intervals.pop_back();
1201 bool result = SplitRegLiveInterval(LI);
1202 if (result) Split.insert(LI);
1209 unsigned PreAllocSplitting::getNumberOfNonSpills(
1210 SmallPtrSet<MachineInstr*, 4>& MIs,
1211 unsigned Reg, int FrameIndex,
1212 bool& FeedsTwoAddr) {
1213 unsigned NonSpills = 0;
1214 for (SmallPtrSet<MachineInstr*, 4>::iterator UI = MIs.begin(), UE = MIs.end();
1216 int StoreFrameIndex;
1217 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1218 if (StoreVReg != Reg || StoreFrameIndex != FrameIndex)
1221 int DefIdx = (*UI)->findRegisterDefOperandIdx(Reg);
1222 if (DefIdx != -1 && (*UI)->isRegTiedToUseOperand(DefIdx))
1223 FeedsTwoAddr = true;
1229 /// removeDeadSpills - After doing splitting, filter through all intervals we've
1230 /// split, and see if any of the spills are unnecessary. If so, remove them.
1231 bool PreAllocSplitting::removeDeadSpills(SmallPtrSet<LiveInterval*, 8>& split) {
1232 bool changed = false;
1234 // Walk over all of the live intervals that were touched by the splitter,
1235 // and see if we can do any DCE and/or folding.
1236 for (SmallPtrSet<LiveInterval*, 8>::iterator LI = split.begin(),
1237 LE = split.end(); LI != LE; ++LI) {
1238 DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> > VNUseCount;
1240 // First, collect all the uses of the vreg, and sort them by their
1241 // reaching definition (VNInfo).
1242 for (MachineRegisterInfo::use_iterator UI = MRI->use_begin((*LI)->reg),
1243 UE = MRI->use_end(); UI != UE; ++UI) {
1244 SlotIndex index = LIs->getInstructionIndex(&*UI);
1245 index = index.getUseIndex();
1247 const LiveRange* LR = (*LI)->getLiveRangeContaining(index);
1248 VNUseCount[LR->valno].insert(&*UI);
1251 // Now, take the definitions (VNInfo's) one at a time and try to DCE
1252 // and/or fold them away.
1253 for (LiveInterval::vni_iterator VI = (*LI)->vni_begin(),
1254 VE = (*LI)->vni_end(); VI != VE; ++VI) {
1256 if (DeadSplitLimit != -1 && (int)NumDeadSpills == DeadSplitLimit)
1259 VNInfo* CurrVN = *VI;
1261 // We don't currently try to handle definitions with PHI kills, because
1262 // it would involve processing more than one VNInfo at once.
1263 if (CurrVN->hasPHIKill()) continue;
1265 // We also don't try to handle the results of PHI joins, since there's
1266 // no defining instruction to analyze.
1267 if (!CurrVN->isDefAccurate() || CurrVN->isUnused()) continue;
1269 // We're only interested in eliminating cruft introduced by the splitter,
1270 // is of the form load-use or load-use-store. First, check that the
1271 // definition is a load, and remember what stack slot we loaded it from.
1272 MachineInstr* DefMI = LIs->getInstructionFromIndex(CurrVN->def);
1274 if (!TII->isLoadFromStackSlot(DefMI, FrameIndex)) continue;
1276 // If the definition has no uses at all, just DCE it.
1277 if (VNUseCount[CurrVN].size() == 0) {
1278 LIs->RemoveMachineInstrFromMaps(DefMI);
1279 (*LI)->removeValNo(CurrVN);
1280 DefMI->eraseFromParent();
1281 VNUseCount.erase(CurrVN);
1287 // Second, get the number of non-store uses of the definition, as well as
1288 // a flag indicating whether it feeds into a later two-address definition.
1289 bool FeedsTwoAddr = false;
1290 unsigned NonSpillCount = getNumberOfNonSpills(VNUseCount[CurrVN],
1291 (*LI)->reg, FrameIndex,
1294 // If there's one non-store use and it doesn't feed a two-addr, then
1295 // this is a load-use-store case that we can try to fold.
1296 if (NonSpillCount == 1 && !FeedsTwoAddr) {
1297 // Start by finding the non-store use MachineInstr.
1298 SmallPtrSet<MachineInstr*, 4>::iterator UI = VNUseCount[CurrVN].begin();
1299 int StoreFrameIndex;
1300 unsigned StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1301 while (UI != VNUseCount[CurrVN].end() &&
1302 (StoreVReg == (*LI)->reg && StoreFrameIndex == FrameIndex)) {
1304 if (UI != VNUseCount[CurrVN].end())
1305 StoreVReg = TII->isStoreToStackSlot(*UI, StoreFrameIndex);
1307 if (UI == VNUseCount[CurrVN].end()) continue;
1309 MachineInstr* use = *UI;
1311 // Attempt to fold it away!
1312 int OpIdx = use->findRegisterUseOperandIdx((*LI)->reg, false);
1313 if (OpIdx == -1) continue;
1314 SmallVector<unsigned, 1> Ops;
1315 Ops.push_back(OpIdx);
1316 if (!TII->canFoldMemoryOperand(use, Ops)) continue;
1318 MachineInstr* NewMI =
1319 TII->foldMemoryOperand(*use->getParent()->getParent(),
1320 use, Ops, FrameIndex);
1322 if (!NewMI) continue;
1324 // Update relevant analyses.
1325 LIs->RemoveMachineInstrFromMaps(DefMI);
1326 LIs->ReplaceMachineInstrInMaps(use, NewMI);
1327 (*LI)->removeValNo(CurrVN);
1329 DefMI->eraseFromParent();
1330 MachineBasicBlock* MBB = use->getParent();
1331 NewMI = MBB->insert(MBB->erase(use), NewMI);
1332 VNUseCount[CurrVN].erase(use);
1334 // Remove deleted instructions. Note that we need to remove them from
1335 // the VNInfo->use map as well, just to be safe.
1336 for (SmallPtrSet<MachineInstr*, 4>::iterator II =
1337 VNUseCount[CurrVN].begin(), IE = VNUseCount[CurrVN].end();
1339 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1340 VNI = VNUseCount.begin(), VNE = VNUseCount.end(); VNI != VNE;
1342 if (VNI->first != CurrVN)
1343 VNI->second.erase(*II);
1344 LIs->RemoveMachineInstrFromMaps(*II);
1345 (*II)->eraseFromParent();
1348 VNUseCount.erase(CurrVN);
1350 for (DenseMap<VNInfo*, SmallPtrSet<MachineInstr*, 4> >::iterator
1351 VI = VNUseCount.begin(), VE = VNUseCount.end(); VI != VE; ++VI)
1352 if (VI->second.erase(use))
1353 VI->second.insert(NewMI);
1360 // If there's more than one non-store instruction, we can't profitably
1361 // fold it, so bail.
1362 if (NonSpillCount) continue;
1364 // Otherwise, this is a load-store case, so DCE them.
1365 for (SmallPtrSet<MachineInstr*, 4>::iterator UI =
1366 VNUseCount[CurrVN].begin(), UE = VNUseCount[CurrVN].end();
1368 LIs->RemoveMachineInstrFromMaps(*UI);
1369 (*UI)->eraseFromParent();
1372 VNUseCount.erase(CurrVN);
1374 LIs->RemoveMachineInstrFromMaps(DefMI);
1375 (*LI)->removeValNo(CurrVN);
1376 DefMI->eraseFromParent();
1385 bool PreAllocSplitting::createsNewJoin(LiveRange* LR,
1386 MachineBasicBlock* DefMBB,
1387 MachineBasicBlock* BarrierMBB) {
1388 if (DefMBB == BarrierMBB)
1391 if (LR->valno->hasPHIKill())
1394 SlotIndex MBBEnd = LIs->getMBBEndIdx(BarrierMBB);
1395 if (LR->end < MBBEnd)
1398 MachineLoopInfo& MLI = getAnalysis<MachineLoopInfo>();
1399 if (MLI.getLoopFor(DefMBB) != MLI.getLoopFor(BarrierMBB))
1402 MachineDominatorTree& MDT = getAnalysis<MachineDominatorTree>();
1403 SmallPtrSet<MachineBasicBlock*, 4> Visited;
1404 typedef std::pair<MachineBasicBlock*,
1405 MachineBasicBlock::succ_iterator> ItPair;
1406 SmallVector<ItPair, 4> Stack;
1407 Stack.push_back(std::make_pair(BarrierMBB, BarrierMBB->succ_begin()));
1409 while (!Stack.empty()) {
1410 ItPair P = Stack.back();
1413 MachineBasicBlock* PredMBB = P.first;
1414 MachineBasicBlock::succ_iterator S = P.second;
1416 if (S == PredMBB->succ_end())
1418 else if (Visited.count(*S)) {
1419 Stack.push_back(std::make_pair(PredMBB, ++S));
1422 Stack.push_back(std::make_pair(PredMBB, S+1));
1424 MachineBasicBlock* MBB = *S;
1425 Visited.insert(MBB);
1427 if (MBB == BarrierMBB)
1430 MachineDomTreeNode* DefMDTN = MDT.getNode(DefMBB);
1431 MachineDomTreeNode* BarrierMDTN = MDT.getNode(BarrierMBB);
1432 MachineDomTreeNode* MDTN = MDT.getNode(MBB)->getIDom();
1434 if (MDTN == DefMDTN)
1436 else if (MDTN == BarrierMDTN)
1438 MDTN = MDTN->getIDom();
1441 MBBEnd = LIs->getMBBEndIdx(MBB);
1442 if (LR->end > MBBEnd)
1443 Stack.push_back(std::make_pair(MBB, MBB->succ_begin()));
1450 bool PreAllocSplitting::runOnMachineFunction(MachineFunction &MF) {
1452 TM = &MF.getTarget();
1453 TRI = TM->getRegisterInfo();
1454 TII = TM->getInstrInfo();
1455 MFI = MF.getFrameInfo();
1456 MRI = &MF.getRegInfo();
1457 SIs = &getAnalysis<SlotIndexes>();
1458 LIs = &getAnalysis<LiveIntervals>();
1459 LSs = &getAnalysis<LiveStacks>();
1460 VRM = &getAnalysis<VirtRegMap>();
1462 bool MadeChange = false;
1464 // Make sure blocks are numbered in order.
1465 MF.RenumberBlocks();
1467 MachineBasicBlock *Entry = MF.begin();
1468 SmallPtrSet<MachineBasicBlock*,16> Visited;
1470 SmallPtrSet<LiveInterval*, 8> Split;
1472 for (df_ext_iterator<MachineBasicBlock*, SmallPtrSet<MachineBasicBlock*,16> >
1473 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
1476 for (MachineBasicBlock::iterator I = BarrierMBB->begin(),
1477 E = BarrierMBB->end(); I != E; ++I) {
1479 const TargetRegisterClass **BarrierRCs =
1480 Barrier->getDesc().getRegClassBarriers();
1483 BarrierIdx = LIs->getInstructionIndex(Barrier);
1484 MadeChange |= SplitRegLiveIntervals(BarrierRCs, Split);
1488 MadeChange |= removeDeadSpills(Split);