1 //===-- LiveIntervalAnalysis.cpp - Live Interval Analysis -----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the LiveInterval analysis pass which is used
11 // by the Linear Scan Register allocator. This pass linearizes the
12 // basic blocks of the function in DFS order and uses the
13 // LiveVariables pass to conservatively compute live intervals for
14 // each virtual and physical register.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "liveintervals"
19 #include "llvm/CodeGen/LiveIntervalAnalysis.h"
20 #include "VirtRegMap.h"
21 #include "llvm/Value.h"
22 #include "llvm/CodeGen/LiveVariables.h"
23 #include "llvm/CodeGen/MachineFrameInfo.h"
24 #include "llvm/CodeGen/MachineInstr.h"
25 #include "llvm/CodeGen/MachineLoopInfo.h"
26 #include "llvm/CodeGen/MachineRegisterInfo.h"
27 #include "llvm/CodeGen/Passes.h"
28 #include "llvm/Target/TargetRegisterInfo.h"
29 #include "llvm/Target/TargetInstrInfo.h"
30 #include "llvm/Target/TargetMachine.h"
31 #include "llvm/Support/CommandLine.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/ADT/Statistic.h"
34 #include "llvm/ADT/STLExtras.h"
40 // Hidden options for help debugging.
41 cl::opt<bool> DisableReMat("disable-rematerialization",
42 cl::init(false), cl::Hidden);
44 cl::opt<bool> SplitAtBB("split-intervals-at-bb",
45 cl::init(true), cl::Hidden);
46 cl::opt<int> SplitLimit("split-limit",
47 cl::init(-1), cl::Hidden);
50 STATISTIC(numIntervals, "Number of original intervals");
51 STATISTIC(numIntervalsAfter, "Number of intervals after coalescing");
52 STATISTIC(numFolds , "Number of loads/stores folded into instructions");
53 STATISTIC(numSplits , "Number of intervals split");
55 char LiveIntervals::ID = 0;
57 RegisterPass<LiveIntervals> X("liveintervals", "Live Interval Analysis");
60 void LiveIntervals::getAnalysisUsage(AnalysisUsage &AU) const {
61 AU.addPreserved<LiveVariables>();
62 AU.addRequired<LiveVariables>();
63 AU.addPreservedID(MachineLoopInfoID);
64 AU.addPreservedID(MachineDominatorsID);
65 AU.addPreservedID(PHIEliminationID);
66 AU.addRequiredID(PHIEliminationID);
67 AU.addRequiredID(TwoAddressInstructionPassID);
68 MachineFunctionPass::getAnalysisUsage(AU);
71 void LiveIntervals::releaseMemory() {
76 // Release VNInfo memroy regions after all VNInfo objects are dtor'd.
77 VNInfoAllocator.Reset();
78 for (unsigned i = 0, e = ClonedMIs.size(); i != e; ++i)
82 /// runOnMachineFunction - Register allocate the whole function
84 bool LiveIntervals::runOnMachineFunction(MachineFunction &fn) {
86 tm_ = &fn.getTarget();
87 tri_ = tm_->getRegisterInfo();
88 tii_ = tm_->getInstrInfo();
89 lv_ = &getAnalysis<LiveVariables>();
90 allocatableRegs_ = tri_->getAllocatableSet(fn);
92 // Number MachineInstrs and MachineBasicBlocks.
93 // Initialize MBB indexes to a sentinal.
94 MBB2IdxMap.resize(mf_->getNumBlockIDs(), std::make_pair(~0U,~0U));
97 for (MachineFunction::iterator MBB = mf_->begin(), E = mf_->end();
99 unsigned StartIdx = MIIndex;
101 for (MachineBasicBlock::iterator I = MBB->begin(), E = MBB->end();
103 bool inserted = mi2iMap_.insert(std::make_pair(I, MIIndex)).second;
104 assert(inserted && "multiple MachineInstr -> index mappings");
105 i2miMap_.push_back(I);
106 MIIndex += InstrSlots::NUM;
109 // Set the MBB2IdxMap entry for this MBB.
110 MBB2IdxMap[MBB->getNumber()] = std::make_pair(StartIdx, MIIndex - 1);
111 Idx2MBBMap.push_back(std::make_pair(StartIdx, MBB));
113 std::sort(Idx2MBBMap.begin(), Idx2MBBMap.end(), Idx2MBBCompare());
117 numIntervals += getNumIntervals();
119 DOUT << "********** INTERVALS **********\n";
120 for (iterator I = begin(), E = end(); I != E; ++I) {
121 I->second.print(DOUT, tri_);
125 numIntervalsAfter += getNumIntervals();
130 /// print - Implement the dump method.
131 void LiveIntervals::print(std::ostream &O, const Module* ) const {
132 O << "********** INTERVALS **********\n";
133 for (const_iterator I = begin(), E = end(); I != E; ++I) {
134 I->second.print(DOUT, tri_);
138 O << "********** MACHINEINSTRS **********\n";
139 for (MachineFunction::iterator mbbi = mf_->begin(), mbbe = mf_->end();
140 mbbi != mbbe; ++mbbi) {
141 O << ((Value*)mbbi->getBasicBlock())->getName() << ":\n";
142 for (MachineBasicBlock::iterator mii = mbbi->begin(),
143 mie = mbbi->end(); mii != mie; ++mii) {
144 O << getInstructionIndex(mii) << '\t' << *mii;
149 /// conflictsWithPhysRegDef - Returns true if the specified register
150 /// is defined during the duration of the specified interval.
151 bool LiveIntervals::conflictsWithPhysRegDef(const LiveInterval &li,
152 VirtRegMap &vrm, unsigned reg) {
153 for (LiveInterval::Ranges::const_iterator
154 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
155 for (unsigned index = getBaseIndex(I->start),
156 end = getBaseIndex(I->end-1) + InstrSlots::NUM; index != end;
157 index += InstrSlots::NUM) {
158 // skip deleted instructions
159 while (index != end && !getInstructionFromIndex(index))
160 index += InstrSlots::NUM;
161 if (index == end) break;
163 MachineInstr *MI = getInstructionFromIndex(index);
164 unsigned SrcReg, DstReg;
165 if (tii_->isMoveInstr(*MI, SrcReg, DstReg))
166 if (SrcReg == li.reg || DstReg == li.reg)
168 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
169 MachineOperand& mop = MI->getOperand(i);
170 if (!mop.isRegister())
172 unsigned PhysReg = mop.getReg();
173 if (PhysReg == 0 || PhysReg == li.reg)
175 if (TargetRegisterInfo::isVirtualRegister(PhysReg)) {
176 if (!vrm.hasPhys(PhysReg))
178 PhysReg = vrm.getPhys(PhysReg);
180 if (PhysReg && tri_->regsOverlap(PhysReg, reg))
189 void LiveIntervals::printRegName(unsigned reg) const {
190 if (TargetRegisterInfo::isPhysicalRegister(reg))
191 cerr << tri_->getName(reg);
193 cerr << "%reg" << reg;
196 void LiveIntervals::handleVirtualRegisterDef(MachineBasicBlock *mbb,
197 MachineBasicBlock::iterator mi,
199 LiveInterval &interval) {
200 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
201 LiveVariables::VarInfo& vi = lv_->getVarInfo(interval.reg);
203 // Virtual registers may be defined multiple times (due to phi
204 // elimination and 2-addr elimination). Much of what we do only has to be
205 // done once for the vreg. We use an empty interval to detect the first
206 // time we see a vreg.
207 if (interval.empty()) {
208 // Get the Idx of the defining instructions.
209 unsigned defIndex = getDefIndex(MIIdx);
211 MachineInstr *CopyMI = NULL;
212 unsigned SrcReg, DstReg;
213 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
214 tii_->isMoveInstr(*mi, SrcReg, DstReg))
216 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
218 assert(ValNo->id == 0 && "First value in interval is not 0?");
220 // Loop over all of the blocks that the vreg is defined in. There are
221 // two cases we have to handle here. The most common case is a vreg
222 // whose lifetime is contained within a basic block. In this case there
223 // will be a single kill, in MBB, which comes after the definition.
224 if (vi.Kills.size() == 1 && vi.Kills[0]->getParent() == mbb) {
225 // FIXME: what about dead vars?
227 if (vi.Kills[0] != mi)
228 killIdx = getUseIndex(getInstructionIndex(vi.Kills[0]))+1;
230 killIdx = defIndex+1;
232 // If the kill happens after the definition, we have an intra-block
234 if (killIdx > defIndex) {
235 assert(vi.AliveBlocks.none() &&
236 "Shouldn't be alive across any blocks!");
237 LiveRange LR(defIndex, killIdx, ValNo);
238 interval.addRange(LR);
239 DOUT << " +" << LR << "\n";
240 interval.addKill(ValNo, killIdx);
245 // The other case we handle is when a virtual register lives to the end
246 // of the defining block, potentially live across some blocks, then is
247 // live into some number of blocks, but gets killed. Start by adding a
248 // range that goes from this definition to the end of the defining block.
249 LiveRange NewLR(defIndex,
250 getInstructionIndex(&mbb->back()) + InstrSlots::NUM,
252 DOUT << " +" << NewLR;
253 interval.addRange(NewLR);
255 // Iterate over all of the blocks that the variable is completely
256 // live in, adding [insrtIndex(begin), instrIndex(end)+4) to the
258 for (unsigned i = 0, e = vi.AliveBlocks.size(); i != e; ++i) {
259 if (vi.AliveBlocks[i]) {
260 MachineBasicBlock *MBB = mf_->getBlockNumbered(i);
262 LiveRange LR(getMBBStartIdx(i),
263 getInstructionIndex(&MBB->back()) + InstrSlots::NUM,
265 interval.addRange(LR);
271 // Finally, this virtual register is live from the start of any killing
272 // block to the 'use' slot of the killing instruction.
273 for (unsigned i = 0, e = vi.Kills.size(); i != e; ++i) {
274 MachineInstr *Kill = vi.Kills[i];
275 unsigned killIdx = getUseIndex(getInstructionIndex(Kill))+1;
276 LiveRange LR(getMBBStartIdx(Kill->getParent()),
278 interval.addRange(LR);
279 interval.addKill(ValNo, killIdx);
284 // If this is the second time we see a virtual register definition, it
285 // must be due to phi elimination or two addr elimination. If this is
286 // the result of two address elimination, then the vreg is one of the
287 // def-and-use register operand.
288 if (mi->isRegReDefinedByTwoAddr(interval.reg)) {
289 // If this is a two-address definition, then we have already processed
290 // the live range. The only problem is that we didn't realize there
291 // are actually two values in the live interval. Because of this we
292 // need to take the LiveRegion that defines this register and split it
294 assert(interval.containsOneValue());
295 unsigned DefIndex = getDefIndex(interval.getValNumInfo(0)->def);
296 unsigned RedefIndex = getDefIndex(MIIdx);
298 const LiveRange *OldLR = interval.getLiveRangeContaining(RedefIndex-1);
299 VNInfo *OldValNo = OldLR->valno;
301 // Delete the initial value, which should be short and continuous,
302 // because the 2-addr copy must be in the same MBB as the redef.
303 interval.removeRange(DefIndex, RedefIndex);
305 // Two-address vregs should always only be redefined once. This means
306 // that at this point, there should be exactly one value number in it.
307 assert(interval.containsOneValue() && "Unexpected 2-addr liveint!");
309 // The new value number (#1) is defined by the instruction we claimed
311 VNInfo *ValNo = interval.getNextValue(OldValNo->def, OldValNo->copy,
314 // Value#0 is now defined by the 2-addr instruction.
315 OldValNo->def = RedefIndex;
318 // Add the new live interval which replaces the range for the input copy.
319 LiveRange LR(DefIndex, RedefIndex, ValNo);
320 DOUT << " replace range with " << LR;
321 interval.addRange(LR);
322 interval.addKill(ValNo, RedefIndex);
324 // If this redefinition is dead, we need to add a dummy unit live
325 // range covering the def slot.
326 if (lv_->RegisterDefIsDead(mi, interval.reg))
327 interval.addRange(LiveRange(RedefIndex, RedefIndex+1, OldValNo));
330 interval.print(DOUT, tri_);
333 // Otherwise, this must be because of phi elimination. If this is the
334 // first redefinition of the vreg that we have seen, go back and change
335 // the live range in the PHI block to be a different value number.
336 if (interval.containsOneValue()) {
337 assert(vi.Kills.size() == 1 &&
338 "PHI elimination vreg should have one kill, the PHI itself!");
340 // Remove the old range that we now know has an incorrect number.
341 VNInfo *VNI = interval.getValNumInfo(0);
342 MachineInstr *Killer = vi.Kills[0];
343 unsigned Start = getMBBStartIdx(Killer->getParent());
344 unsigned End = getUseIndex(getInstructionIndex(Killer))+1;
345 DOUT << " Removing [" << Start << "," << End << "] from: ";
346 interval.print(DOUT, tri_); DOUT << "\n";
347 interval.removeRange(Start, End);
348 VNI->hasPHIKill = true;
349 DOUT << " RESULT: "; interval.print(DOUT, tri_);
351 // Replace the interval with one of a NEW value number. Note that this
352 // value number isn't actually defined by an instruction, weird huh? :)
353 LiveRange LR(Start, End, interval.getNextValue(~0, 0, VNInfoAllocator));
354 DOUT << " replace range with " << LR;
355 interval.addRange(LR);
356 interval.addKill(LR.valno, End);
357 DOUT << " RESULT: "; interval.print(DOUT, tri_);
360 // In the case of PHI elimination, each variable definition is only
361 // live until the end of the block. We've already taken care of the
362 // rest of the live range.
363 unsigned defIndex = getDefIndex(MIIdx);
366 MachineInstr *CopyMI = NULL;
367 unsigned SrcReg, DstReg;
368 if (mi->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
369 tii_->isMoveInstr(*mi, SrcReg, DstReg))
371 ValNo = interval.getNextValue(defIndex, CopyMI, VNInfoAllocator);
373 unsigned killIndex = getInstructionIndex(&mbb->back()) + InstrSlots::NUM;
374 LiveRange LR(defIndex, killIndex, ValNo);
375 interval.addRange(LR);
376 interval.addKill(ValNo, killIndex);
377 ValNo->hasPHIKill = true;
385 void LiveIntervals::handlePhysicalRegisterDef(MachineBasicBlock *MBB,
386 MachineBasicBlock::iterator mi,
388 LiveInterval &interval,
389 MachineInstr *CopyMI) {
390 // A physical register cannot be live across basic block, so its
391 // lifetime must end somewhere in its defining basic block.
392 DOUT << "\t\tregister: "; DEBUG(printRegName(interval.reg));
394 unsigned baseIndex = MIIdx;
395 unsigned start = getDefIndex(baseIndex);
396 unsigned end = start;
398 // If it is not used after definition, it is considered dead at
399 // the instruction defining it. Hence its interval is:
400 // [defSlot(def), defSlot(def)+1)
401 if (lv_->RegisterDefIsDead(mi, interval.reg)) {
403 end = getDefIndex(start) + 1;
407 // If it is not dead on definition, it must be killed by a
408 // subsequent instruction. Hence its interval is:
409 // [defSlot(def), useSlot(kill)+1)
410 while (++mi != MBB->end()) {
411 baseIndex += InstrSlots::NUM;
412 if (lv_->KillsRegister(mi, interval.reg)) {
414 end = getUseIndex(baseIndex) + 1;
416 } else if (lv_->ModifiesRegister(mi, interval.reg)) {
417 // Another instruction redefines the register before it is ever read.
418 // Then the register is essentially dead at the instruction that defines
419 // it. Hence its interval is:
420 // [defSlot(def), defSlot(def)+1)
422 end = getDefIndex(start) + 1;
427 // The only case we should have a dead physreg here without a killing or
428 // instruction where we know it's dead is if it is live-in to the function
430 assert(!CopyMI && "physreg was not killed in defining block!");
431 end = getDefIndex(start) + 1; // It's dead.
434 assert(start < end && "did not find end of interval?");
436 // Already exists? Extend old live interval.
437 LiveInterval::iterator OldLR = interval.FindLiveRangeContaining(start);
438 VNInfo *ValNo = (OldLR != interval.end())
439 ? OldLR->valno : interval.getNextValue(start, CopyMI, VNInfoAllocator);
440 LiveRange LR(start, end, ValNo);
441 interval.addRange(LR);
442 interval.addKill(LR.valno, end);
443 DOUT << " +" << LR << '\n';
446 void LiveIntervals::handleRegisterDef(MachineBasicBlock *MBB,
447 MachineBasicBlock::iterator MI,
450 if (TargetRegisterInfo::isVirtualRegister(reg))
451 handleVirtualRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg));
452 else if (allocatableRegs_[reg]) {
453 MachineInstr *CopyMI = NULL;
454 unsigned SrcReg, DstReg;
455 if (MI->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG ||
456 tii_->isMoveInstr(*MI, SrcReg, DstReg))
458 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(reg), CopyMI);
459 // Def of a register also defines its sub-registers.
460 for (const unsigned* AS = tri_->getSubRegisters(reg); *AS; ++AS)
461 // Avoid processing some defs more than once.
462 if (!MI->findRegisterDefOperand(*AS))
463 handlePhysicalRegisterDef(MBB, MI, MIIdx, getOrCreateInterval(*AS), 0);
467 void LiveIntervals::handleLiveInRegister(MachineBasicBlock *MBB,
469 LiveInterval &interval, bool isAlias) {
470 DOUT << "\t\tlivein register: "; DEBUG(printRegName(interval.reg));
472 // Look for kills, if it reaches a def before it's killed, then it shouldn't
473 // be considered a livein.
474 MachineBasicBlock::iterator mi = MBB->begin();
475 unsigned baseIndex = MIIdx;
476 unsigned start = baseIndex;
477 unsigned end = start;
478 while (mi != MBB->end()) {
479 if (lv_->KillsRegister(mi, interval.reg)) {
481 end = getUseIndex(baseIndex) + 1;
483 } else if (lv_->ModifiesRegister(mi, interval.reg)) {
484 // Another instruction redefines the register before it is ever read.
485 // Then the register is essentially dead at the instruction that defines
486 // it. Hence its interval is:
487 // [defSlot(def), defSlot(def)+1)
489 end = getDefIndex(start) + 1;
493 baseIndex += InstrSlots::NUM;
498 // Live-in register might not be used at all.
502 end = getDefIndex(MIIdx) + 1;
504 DOUT << " live through";
509 LiveRange LR(start, end, interval.getNextValue(start, 0, VNInfoAllocator));
510 interval.addRange(LR);
511 interval.addKill(LR.valno, end);
512 DOUT << " +" << LR << '\n';
515 /// computeIntervals - computes the live intervals for virtual
516 /// registers. for some ordering of the machine instructions [1,N] a
517 /// live interval is an interval [i, j) where 1 <= i <= j < N for
518 /// which a variable is live
519 void LiveIntervals::computeIntervals() {
520 DOUT << "********** COMPUTING LIVE INTERVALS **********\n"
521 << "********** Function: "
522 << ((Value*)mf_->getFunction())->getName() << '\n';
523 // Track the index of the current machine instr.
524 unsigned MIIndex = 0;
525 for (MachineFunction::iterator MBBI = mf_->begin(), E = mf_->end();
527 MachineBasicBlock *MBB = MBBI;
528 DOUT << ((Value*)MBB->getBasicBlock())->getName() << ":\n";
530 MachineBasicBlock::iterator MI = MBB->begin(), miEnd = MBB->end();
532 // Create intervals for live-ins to this BB first.
533 for (MachineBasicBlock::const_livein_iterator LI = MBB->livein_begin(),
534 LE = MBB->livein_end(); LI != LE; ++LI) {
535 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*LI));
536 // Multiple live-ins can alias the same register.
537 for (const unsigned* AS = tri_->getSubRegisters(*LI); *AS; ++AS)
538 if (!hasInterval(*AS))
539 handleLiveInRegister(MBB, MIIndex, getOrCreateInterval(*AS),
543 for (; MI != miEnd; ++MI) {
544 DOUT << MIIndex << "\t" << *MI;
547 for (int i = MI->getNumOperands() - 1; i >= 0; --i) {
548 MachineOperand &MO = MI->getOperand(i);
549 // handle register defs - build intervals
550 if (MO.isRegister() && MO.getReg() && MO.isDef())
551 handleRegisterDef(MBB, MI, MIIndex, MO.getReg());
554 MIIndex += InstrSlots::NUM;
559 bool LiveIntervals::findLiveInMBBs(const LiveRange &LR,
560 SmallVectorImpl<MachineBasicBlock*> &MBBs) const {
561 std::vector<IdxMBBPair>::const_iterator I =
562 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), LR.start);
565 while (I != Idx2MBBMap.end()) {
566 if (LR.end <= I->first)
568 MBBs.push_back(I->second);
576 LiveInterval LiveIntervals::createInterval(unsigned reg) {
577 float Weight = TargetRegisterInfo::isPhysicalRegister(reg) ?
579 return LiveInterval(reg, Weight);
582 /// getVNInfoSourceReg - Helper function that parses the specified VNInfo
583 /// copy field and returns the source register that defines it.
584 unsigned LiveIntervals::getVNInfoSourceReg(const VNInfo *VNI) const {
588 if (VNI->copy->getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)
589 return VNI->copy->getOperand(1).getReg();
590 unsigned SrcReg, DstReg;
591 if (tii_->isMoveInstr(*VNI->copy, SrcReg, DstReg))
593 assert(0 && "Unrecognized copy instruction!");
597 //===----------------------------------------------------------------------===//
598 // Register allocator hooks.
601 /// isReMaterializable - Returns true if the definition MI of the specified
602 /// val# of the specified interval is re-materializable.
603 bool LiveIntervals::isReMaterializable(const LiveInterval &li,
604 const VNInfo *ValNo, MachineInstr *MI,
610 const TargetInstrDesc &TID = MI->getDesc();
611 if (TID.isImplicitDef() || tii_->isTriviallyReMaterializable(MI)) {
612 isLoad = TID.isSimpleLoad();
617 if (!tii_->isLoadFromStackSlot(MI, FrameIdx) ||
618 !mf_->getFrameInfo()->isImmutableObjectIndex(FrameIdx))
621 // This is a load from fixed stack slot. It can be rematerialized unless it's
622 // re-defined by a two-address instruction.
624 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
626 const VNInfo *VNI = *i;
629 unsigned DefIdx = VNI->def;
631 continue; // Dead val#.
632 MachineInstr *DefMI = (DefIdx == ~0u)
633 ? NULL : getInstructionFromIndex(DefIdx);
634 if (DefMI && DefMI->isRegReDefinedByTwoAddr(li.reg)) {
642 /// isReMaterializable - Returns true if every definition of MI of every
643 /// val# of the specified interval is re-materializable.
644 bool LiveIntervals::isReMaterializable(const LiveInterval &li, bool &isLoad) {
646 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
648 const VNInfo *VNI = *i;
649 unsigned DefIdx = VNI->def;
651 continue; // Dead val#.
652 // Is the def for the val# rematerializable?
655 MachineInstr *ReMatDefMI = getInstructionFromIndex(DefIdx);
656 bool DefIsLoad = false;
657 if (!ReMatDefMI || !isReMaterializable(li, VNI, ReMatDefMI, DefIsLoad))
664 /// tryFoldMemoryOperand - Attempts to fold either a spill / restore from
665 /// slot / to reg or any rematerialized load into ith operand of specified
666 /// MI. If it is successul, MI is updated with the newly created MI and
668 bool LiveIntervals::tryFoldMemoryOperand(MachineInstr* &MI,
669 VirtRegMap &vrm, MachineInstr *DefMI,
671 SmallVector<unsigned, 2> &Ops,
672 bool isSS, int Slot, unsigned Reg) {
674 const TargetInstrDesc &TID = MI->getDesc();
675 // If it is an implicit def instruction, just delete it.
676 if (TID.isImplicitDef()) {
677 RemoveMachineInstrFromMaps(MI);
678 vrm.RemoveMachineInstrFromMaps(MI);
679 MI->eraseFromParent();
684 SmallVector<unsigned, 2> FoldOps;
685 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
686 unsigned OpIdx = Ops[i];
687 // FIXME: fold subreg use.
688 if (MI->getOperand(OpIdx).getSubReg())
690 if (MI->getOperand(OpIdx).isDef())
691 MRInfo |= (unsigned)VirtRegMap::isMod;
693 // Filter out two-address use operand(s).
694 if (TID.getOperandConstraint(OpIdx, TOI::TIED_TO) != -1) {
695 MRInfo = VirtRegMap::isModRef;
698 MRInfo |= (unsigned)VirtRegMap::isRef;
700 FoldOps.push_back(OpIdx);
703 MachineInstr *fmi = isSS ? tii_->foldMemoryOperand(*mf_, MI, FoldOps, Slot)
704 : tii_->foldMemoryOperand(*mf_, MI, FoldOps, DefMI);
706 // Attempt to fold the memory reference into the instruction. If
707 // we can do this, we don't need to insert spill code.
709 lv_->instructionChanged(MI, fmi);
711 fmi->copyKillDeadInfo(MI, tri_);
712 MachineBasicBlock &MBB = *MI->getParent();
713 if (isSS && !mf_->getFrameInfo()->isImmutableObjectIndex(Slot))
714 vrm.virtFolded(Reg, MI, fmi, (VirtRegMap::ModRef)MRInfo);
715 vrm.transferSpillPts(MI, fmi);
716 vrm.transferRestorePts(MI, fmi);
718 i2miMap_[InstrIdx /InstrSlots::NUM] = fmi;
719 mi2iMap_[fmi] = InstrIdx;
720 MI = MBB.insert(MBB.erase(MI), fmi);
727 /// canFoldMemoryOperand - Returns true if the specified load / store
728 /// folding is possible.
729 bool LiveIntervals::canFoldMemoryOperand(MachineInstr *MI,
730 SmallVector<unsigned, 2> &Ops) const {
731 SmallVector<unsigned, 2> FoldOps;
732 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
733 unsigned OpIdx = Ops[i];
734 // FIXME: fold subreg use.
735 if (MI->getOperand(OpIdx).getSubReg())
737 FoldOps.push_back(OpIdx);
740 return tii_->canFoldMemoryOperand(MI, FoldOps);
743 bool LiveIntervals::intervalIsInOneMBB(const LiveInterval &li) const {
744 SmallPtrSet<MachineBasicBlock*, 4> MBBs;
745 for (LiveInterval::Ranges::const_iterator
746 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
747 std::vector<IdxMBBPair>::const_iterator II =
748 std::lower_bound(Idx2MBBMap.begin(), Idx2MBBMap.end(), I->start);
749 if (II == Idx2MBBMap.end())
751 if (I->end > II->first) // crossing a MBB.
753 MBBs.insert(II->second);
760 /// rewriteInstructionForSpills, rewriteInstructionsForSpills - Helper functions
761 /// for addIntervalsForSpills to rewrite uses / defs for the given live range.
763 rewriteInstructionForSpills(const LiveInterval &li, bool TrySplit,
764 unsigned id, unsigned index, unsigned end, MachineInstr *MI,
765 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
766 unsigned Slot, int LdSlot,
767 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
768 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
769 const TargetRegisterClass* rc,
770 SmallVector<int, 4> &ReMatIds,
771 unsigned &NewVReg, bool &HasDef, bool &HasUse,
772 const MachineLoopInfo *loopInfo,
773 std::map<unsigned,unsigned> &MBBVRegsMap,
774 std::vector<LiveInterval*> &NewLIs) {
775 bool CanFold = false;
777 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
778 MachineOperand& mop = MI->getOperand(i);
779 if (!mop.isRegister())
781 unsigned Reg = mop.getReg();
783 if (Reg == 0 || TargetRegisterInfo::isPhysicalRegister(Reg))
788 bool TryFold = !DefIsReMat;
789 bool FoldSS = true; // Default behavior unless it's a remat.
792 // If this is the rematerializable definition MI itself and
793 // all of its uses are rematerialized, simply delete it.
794 if (MI == ReMatOrigDefMI && CanDelete) {
795 DOUT << "\t\t\t\tErasing re-materlizable def: ";
797 RemoveMachineInstrFromMaps(MI);
798 vrm.RemoveMachineInstrFromMaps(MI);
799 MI->eraseFromParent();
803 // If def for this use can't be rematerialized, then try folding.
804 // If def is rematerializable and it's a load, also try folding.
805 TryFold = !ReMatDefMI || (ReMatDefMI && (MI == ReMatOrigDefMI || isLoad));
807 // Try fold loads (from stack slot, constant pool, etc.) into uses.
813 // Scan all of the operands of this instruction rewriting operands
814 // to use NewVReg instead of li.reg as appropriate. We do this for
817 // 1. If the instr reads the same spilled vreg multiple times, we
818 // want to reuse the NewVReg.
819 // 2. If the instr is a two-addr instruction, we are required to
820 // keep the src/dst regs pinned.
822 // Keep track of whether we replace a use and/or def so that we can
823 // create the spill interval with the appropriate range.
825 HasUse = mop.isUse();
826 HasDef = mop.isDef();
827 SmallVector<unsigned, 2> Ops;
829 for (unsigned j = i+1, e = MI->getNumOperands(); j != e; ++j) {
830 const MachineOperand &MOj = MI->getOperand(j);
831 if (!MOj.isRegister())
833 unsigned RegJ = MOj.getReg();
834 if (RegJ == 0 || TargetRegisterInfo::isPhysicalRegister(RegJ))
838 HasUse |= MOj.isUse();
839 HasDef |= MOj.isDef();
844 // Do not fold load / store here if we are splitting. We'll find an
845 // optimal point to insert a load / store later.
847 if (tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
848 Ops, FoldSS, FoldSlot, Reg)) {
849 // Folding the load/store can completely change the instruction in
850 // unpredictable ways, rescan it from the beginning.
854 goto RestartInstruction;
857 CanFold = canFoldMemoryOperand(MI, Ops);
862 // Create a new virtual register for the spill interval.
863 bool CreatedNewVReg = false;
865 NewVReg = RegInfo.createVirtualRegister(rc);
867 CreatedNewVReg = true;
871 // Reuse NewVReg for other reads.
872 for (unsigned j = 0, e = Ops.size(); j != e; ++j)
873 MI->getOperand(Ops[j]).setReg(NewVReg);
875 if (CreatedNewVReg) {
877 vrm.setVirtIsReMaterialized(NewVReg, ReMatDefMI/*, CanDelete*/);
878 if (ReMatIds[id] == VirtRegMap::MAX_STACK_SLOT) {
879 // Each valnum may have its own remat id.
880 ReMatIds[id] = vrm.assignVirtReMatId(NewVReg);
882 vrm.assignVirtReMatId(NewVReg, ReMatIds[id]);
884 if (!CanDelete || (HasUse && HasDef)) {
885 // If this is a two-addr instruction then its use operands are
886 // rematerializable but its def is not. It should be assigned a
888 vrm.assignVirt2StackSlot(NewVReg, Slot);
891 vrm.assignVirt2StackSlot(NewVReg, Slot);
893 } else if (HasUse && HasDef &&
894 vrm.getStackSlot(NewVReg) == VirtRegMap::NO_STACK_SLOT) {
895 // If this interval hasn't been assigned a stack slot (because earlier
896 // def is a deleted remat def), do it now.
897 assert(Slot != VirtRegMap::NO_STACK_SLOT);
898 vrm.assignVirt2StackSlot(NewVReg, Slot);
901 // create a new register interval for this spill / remat.
902 LiveInterval &nI = getOrCreateInterval(NewVReg);
903 if (CreatedNewVReg) {
904 NewLIs.push_back(&nI);
905 MBBVRegsMap.insert(std::make_pair(MI->getParent()->getNumber(), NewVReg));
907 vrm.setIsSplitFromReg(NewVReg, li.reg);
911 if (CreatedNewVReg) {
912 LiveRange LR(getLoadIndex(index), getUseIndex(index)+1,
913 nI.getNextValue(~0U, 0, VNInfoAllocator));
917 // Extend the split live interval to this def / use.
918 unsigned End = getUseIndex(index)+1;
919 LiveRange LR(nI.ranges[nI.ranges.size()-1].end, End,
920 nI.getValNumInfo(nI.getNumValNums()-1));
926 LiveRange LR(getDefIndex(index), getStoreIndex(index),
927 nI.getNextValue(~0U, 0, VNInfoAllocator));
932 DOUT << "\t\t\t\tAdded new interval: ";
933 nI.print(DOUT, tri_);
938 bool LiveIntervals::anyKillInMBBAfterIdx(const LiveInterval &li,
940 MachineBasicBlock *MBB, unsigned Idx) const {
941 unsigned End = getMBBEndIdx(MBB);
942 for (unsigned j = 0, ee = VNI->kills.size(); j != ee; ++j) {
943 unsigned KillIdx = VNI->kills[j];
944 if (KillIdx > Idx && KillIdx < End)
950 static const VNInfo *findDefinedVNInfo(const LiveInterval &li, unsigned DefIdx) {
951 const VNInfo *VNI = NULL;
952 for (LiveInterval::const_vni_iterator i = li.vni_begin(),
953 e = li.vni_end(); i != e; ++i)
954 if ((*i)->def == DefIdx) {
962 rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit,
963 LiveInterval::Ranges::const_iterator &I,
964 MachineInstr *ReMatOrigDefMI, MachineInstr *ReMatDefMI,
965 unsigned Slot, int LdSlot,
966 bool isLoad, bool isLoadSS, bool DefIsReMat, bool CanDelete,
967 VirtRegMap &vrm, MachineRegisterInfo &RegInfo,
968 const TargetRegisterClass* rc,
969 SmallVector<int, 4> &ReMatIds,
970 const MachineLoopInfo *loopInfo,
971 BitVector &SpillMBBs,
972 std::map<unsigned, std::vector<SRInfo> > &SpillIdxes,
973 BitVector &RestoreMBBs,
974 std::map<unsigned, std::vector<SRInfo> > &RestoreIdxes,
975 std::map<unsigned,unsigned> &MBBVRegsMap,
976 std::vector<LiveInterval*> &NewLIs) {
977 bool AllCanFold = true;
978 unsigned NewVReg = 0;
979 unsigned index = getBaseIndex(I->start);
980 unsigned end = getBaseIndex(I->end-1) + InstrSlots::NUM;
981 for (; index != end; index += InstrSlots::NUM) {
982 // skip deleted instructions
983 while (index != end && !getInstructionFromIndex(index))
984 index += InstrSlots::NUM;
985 if (index == end) break;
987 MachineInstr *MI = getInstructionFromIndex(index);
988 MachineBasicBlock *MBB = MI->getParent();
989 unsigned ThisVReg = 0;
991 std::map<unsigned,unsigned>::const_iterator NVI =
992 MBBVRegsMap.find(MBB->getNumber());
993 if (NVI != MBBVRegsMap.end()) {
994 ThisVReg = NVI->second;
1001 // It's better to start a new interval to avoid artifically
1002 // extend the new interval.
1003 // FIXME: Too slow? Can we fix it after rewriteInstructionsForSpills?
1004 bool MIHasUse = false;
1005 bool MIHasDef = false;
1006 for (unsigned i = 0; i != MI->getNumOperands(); ++i) {
1007 MachineOperand& mop = MI->getOperand(i);
1008 if (!mop.isRegister() || mop.getReg() != li.reg)
1015 if (MIHasDef && !MIHasUse) {
1016 MBBVRegsMap.erase(MBB->getNumber());
1022 bool IsNew = ThisVReg == 0;
1024 // This ends the previous live interval. If all of its def / use
1025 // can be folded, give it a low spill weight.
1026 if (NewVReg && TrySplit && AllCanFold) {
1027 LiveInterval &nI = getOrCreateInterval(NewVReg);
1034 bool HasDef = false;
1035 bool HasUse = false;
1036 bool CanFold = rewriteInstructionForSpills(li, TrySplit, I->valno->id,
1037 index, end, MI, ReMatOrigDefMI, ReMatDefMI,
1038 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1039 CanDelete, vrm, RegInfo, rc, ReMatIds, NewVReg,
1040 HasDef, HasUse, loopInfo, MBBVRegsMap, NewLIs);
1041 if (!HasDef && !HasUse)
1044 AllCanFold &= CanFold;
1046 // Update weight of spill interval.
1047 LiveInterval &nI = getOrCreateInterval(NewVReg);
1049 // The spill weight is now infinity as it cannot be spilled again.
1050 nI.weight = HUGE_VALF;
1054 // Keep track of the last def and first use in each MBB.
1055 unsigned MBBId = MBB->getNumber();
1057 if (MI != ReMatOrigDefMI || !CanDelete) {
1058 bool HasKill = false;
1060 HasKill = anyKillInMBBAfterIdx(li, I->valno, MBB, getDefIndex(index));
1062 // If this is a two-address code, then this index starts a new VNInfo.
1063 const VNInfo *VNI = findDefinedVNInfo(li, getDefIndex(index));
1065 HasKill = anyKillInMBBAfterIdx(li, VNI, MBB, getDefIndex(index));
1067 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1068 SpillIdxes.find(MBBId);
1070 if (SII == SpillIdxes.end()) {
1071 std::vector<SRInfo> S;
1072 S.push_back(SRInfo(index, NewVReg, true));
1073 SpillIdxes.insert(std::make_pair(MBBId, S));
1074 } else if (SII->second.back().vreg != NewVReg) {
1075 SII->second.push_back(SRInfo(index, NewVReg, true));
1076 } else if ((int)index > SII->second.back().index) {
1077 // If there is an earlier def and this is a two-address
1078 // instruction, then it's not possible to fold the store (which
1079 // would also fold the load).
1080 SRInfo &Info = SII->second.back();
1082 Info.canFold = !HasUse;
1084 SpillMBBs.set(MBBId);
1085 } else if (SII != SpillIdxes.end() &&
1086 SII->second.back().vreg == NewVReg &&
1087 (int)index > SII->second.back().index) {
1088 // There is an earlier def that's not killed (must be two-address).
1089 // The spill is no longer needed.
1090 SII->second.pop_back();
1091 if (SII->second.empty()) {
1092 SpillIdxes.erase(MBBId);
1093 SpillMBBs.reset(MBBId);
1100 std::map<unsigned, std::vector<SRInfo> >::iterator SII =
1101 SpillIdxes.find(MBBId);
1102 if (SII != SpillIdxes.end() &&
1103 SII->second.back().vreg == NewVReg &&
1104 (int)index > SII->second.back().index)
1105 // Use(s) following the last def, it's not safe to fold the spill.
1106 SII->second.back().canFold = false;
1107 std::map<unsigned, std::vector<SRInfo> >::iterator RII =
1108 RestoreIdxes.find(MBBId);
1109 if (RII != RestoreIdxes.end() && RII->second.back().vreg == NewVReg)
1110 // If we are splitting live intervals, only fold if it's the first
1111 // use and there isn't another use later in the MBB.
1112 RII->second.back().canFold = false;
1114 // Only need a reload if there isn't an earlier def / use.
1115 if (RII == RestoreIdxes.end()) {
1116 std::vector<SRInfo> Infos;
1117 Infos.push_back(SRInfo(index, NewVReg, true));
1118 RestoreIdxes.insert(std::make_pair(MBBId, Infos));
1120 RII->second.push_back(SRInfo(index, NewVReg, true));
1122 RestoreMBBs.set(MBBId);
1126 // Update spill weight.
1127 unsigned loopDepth = loopInfo->getLoopDepth(MBB);
1128 nI.weight += getSpillWeight(HasDef, HasUse, loopDepth);
1131 if (NewVReg && TrySplit && AllCanFold) {
1132 // If all of its def / use can be folded, give it a low spill weight.
1133 LiveInterval &nI = getOrCreateInterval(NewVReg);
1138 bool LiveIntervals::alsoFoldARestore(int Id, int index, unsigned vr,
1139 BitVector &RestoreMBBs,
1140 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1141 if (!RestoreMBBs[Id])
1143 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1144 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1145 if (Restores[i].index == index &&
1146 Restores[i].vreg == vr &&
1147 Restores[i].canFold)
1152 void LiveIntervals::eraseRestoreInfo(int Id, int index, unsigned vr,
1153 BitVector &RestoreMBBs,
1154 std::map<unsigned,std::vector<SRInfo> > &RestoreIdxes) {
1155 if (!RestoreMBBs[Id])
1157 std::vector<SRInfo> &Restores = RestoreIdxes[Id];
1158 for (unsigned i = 0, e = Restores.size(); i != e; ++i)
1159 if (Restores[i].index == index && Restores[i].vreg)
1160 Restores[i].index = -1;
1164 std::vector<LiveInterval*> LiveIntervals::
1165 addIntervalsForSpills(const LiveInterval &li,
1166 const MachineLoopInfo *loopInfo, VirtRegMap &vrm) {
1167 // Since this is called after the analysis is done we don't know if
1168 // LiveVariables is available
1169 lv_ = getAnalysisToUpdate<LiveVariables>();
1171 assert(li.weight != HUGE_VALF &&
1172 "attempt to spill already spilled interval!");
1174 DOUT << "\t\t\t\tadding intervals for spills for interval: ";
1175 li.print(DOUT, tri_);
1178 // Each bit specify whether it a spill is required in the MBB.
1179 BitVector SpillMBBs(mf_->getNumBlockIDs());
1180 std::map<unsigned, std::vector<SRInfo> > SpillIdxes;
1181 BitVector RestoreMBBs(mf_->getNumBlockIDs());
1182 std::map<unsigned, std::vector<SRInfo> > RestoreIdxes;
1183 std::map<unsigned,unsigned> MBBVRegsMap;
1184 std::vector<LiveInterval*> NewLIs;
1185 MachineRegisterInfo &RegInfo = mf_->getRegInfo();
1186 const TargetRegisterClass* rc = RegInfo.getRegClass(li.reg);
1188 unsigned NumValNums = li.getNumValNums();
1189 SmallVector<MachineInstr*, 4> ReMatDefs;
1190 ReMatDefs.resize(NumValNums, NULL);
1191 SmallVector<MachineInstr*, 4> ReMatOrigDefs;
1192 ReMatOrigDefs.resize(NumValNums, NULL);
1193 SmallVector<int, 4> ReMatIds;
1194 ReMatIds.resize(NumValNums, VirtRegMap::MAX_STACK_SLOT);
1195 BitVector ReMatDelete(NumValNums);
1196 unsigned Slot = VirtRegMap::MAX_STACK_SLOT;
1198 // Spilling a split live interval. It cannot be split any further. Also,
1199 // it's also guaranteed to be a single val# / range interval.
1200 if (vrm.getPreSplitReg(li.reg)) {
1201 vrm.setIsSplitFromReg(li.reg, 0);
1202 // Unset the split kill marker on the last use.
1203 unsigned KillIdx = vrm.getKillPoint(li.reg);
1205 MachineInstr *KillMI = getInstructionFromIndex(KillIdx);
1206 assert(KillMI && "Last use disappeared?");
1207 int KillOp = KillMI->findRegisterUseOperandIdx(li.reg, true);
1208 assert(KillOp != -1 && "Last use disappeared?");
1209 KillMI->getOperand(KillOp).setIsKill(false);
1211 vrm.removeKillPoint(li.reg);
1212 bool DefIsReMat = vrm.isReMaterialized(li.reg);
1213 Slot = vrm.getStackSlot(li.reg);
1214 assert(Slot != VirtRegMap::MAX_STACK_SLOT);
1215 MachineInstr *ReMatDefMI = DefIsReMat ?
1216 vrm.getReMaterializedMI(li.reg) : NULL;
1218 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1219 bool isLoad = isLoadSS ||
1220 (DefIsReMat && (ReMatDefMI->getDesc().isSimpleLoad()));
1221 bool IsFirstRange = true;
1222 for (LiveInterval::Ranges::const_iterator
1223 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1224 // If this is a split live interval with multiple ranges, it means there
1225 // are two-address instructions that re-defined the value. Only the
1226 // first def can be rematerialized!
1228 // Note ReMatOrigDefMI has already been deleted.
1229 rewriteInstructionsForSpills(li, false, I, NULL, ReMatDefMI,
1230 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1231 false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1232 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1233 MBBVRegsMap, NewLIs);
1235 rewriteInstructionsForSpills(li, false, I, NULL, 0,
1236 Slot, 0, false, false, false,
1237 false, vrm, RegInfo, rc, ReMatIds, loopInfo,
1238 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1239 MBBVRegsMap, NewLIs);
1241 IsFirstRange = false;
1246 bool TrySplit = SplitAtBB && !intervalIsInOneMBB(li);
1247 if (SplitLimit != -1 && (int)numSplits >= SplitLimit)
1251 bool NeedStackSlot = false;
1252 for (LiveInterval::const_vni_iterator i = li.vni_begin(), e = li.vni_end();
1254 const VNInfo *VNI = *i;
1255 unsigned VN = VNI->id;
1256 unsigned DefIdx = VNI->def;
1258 continue; // Dead val#.
1259 // Is the def for the val# rematerializable?
1260 MachineInstr *ReMatDefMI = (DefIdx == ~0u)
1261 ? 0 : getInstructionFromIndex(DefIdx);
1263 if (ReMatDefMI && isReMaterializable(li, VNI, ReMatDefMI, dummy)) {
1264 // Remember how to remat the def of this val#.
1265 ReMatOrigDefs[VN] = ReMatDefMI;
1266 // Original def may be modified so we have to make a copy here. vrm must
1268 ReMatDefs[VN] = ReMatDefMI = ReMatDefMI->clone();
1270 bool CanDelete = true;
1271 if (VNI->hasPHIKill) {
1272 // A kill is a phi node, not all of its uses can be rematerialized.
1273 // It must not be deleted.
1275 // Need a stack slot if there is any live range where uses cannot be
1277 NeedStackSlot = true;
1280 ReMatDelete.set(VN);
1282 // Need a stack slot if there is any live range where uses cannot be
1284 NeedStackSlot = true;
1288 // One stack slot per live interval.
1289 if (NeedStackSlot && vrm.getPreSplitReg(li.reg) == 0)
1290 Slot = vrm.assignVirt2StackSlot(li.reg);
1292 // Create new intervals and rewrite defs and uses.
1293 for (LiveInterval::Ranges::const_iterator
1294 I = li.ranges.begin(), E = li.ranges.end(); I != E; ++I) {
1295 MachineInstr *ReMatDefMI = ReMatDefs[I->valno->id];
1296 MachineInstr *ReMatOrigDefMI = ReMatOrigDefs[I->valno->id];
1297 bool DefIsReMat = ReMatDefMI != NULL;
1298 bool CanDelete = ReMatDelete[I->valno->id];
1300 bool isLoadSS = DefIsReMat && tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1301 bool isLoad = isLoadSS ||
1302 (DefIsReMat && ReMatDefMI->getDesc().isSimpleLoad());
1303 rewriteInstructionsForSpills(li, TrySplit, I, ReMatOrigDefMI, ReMatDefMI,
1304 Slot, LdSlot, isLoad, isLoadSS, DefIsReMat,
1305 CanDelete, vrm, RegInfo, rc, ReMatIds, loopInfo,
1306 SpillMBBs, SpillIdxes, RestoreMBBs, RestoreIdxes,
1307 MBBVRegsMap, NewLIs);
1310 // Insert spills / restores if we are splitting.
1314 SmallPtrSet<LiveInterval*, 4> AddedKill;
1315 SmallVector<unsigned, 2> Ops;
1316 if (NeedStackSlot) {
1317 int Id = SpillMBBs.find_first();
1319 std::vector<SRInfo> &spills = SpillIdxes[Id];
1320 for (unsigned i = 0, e = spills.size(); i != e; ++i) {
1321 int index = spills[i].index;
1322 unsigned VReg = spills[i].vreg;
1323 LiveInterval &nI = getOrCreateInterval(VReg);
1324 bool isReMat = vrm.isReMaterialized(VReg);
1325 MachineInstr *MI = getInstructionFromIndex(index);
1326 bool CanFold = false;
1327 bool FoundUse = false;
1329 if (spills[i].canFold) {
1331 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1332 MachineOperand &MO = MI->getOperand(j);
1333 if (!MO.isRegister() || MO.getReg() != VReg)
1340 (!FoundUse && !alsoFoldARestore(Id, index, VReg,
1341 RestoreMBBs, RestoreIdxes))) {
1342 // MI has two-address uses of the same register. If the use
1343 // isn't the first and only use in the BB, then we can't fold
1344 // it. FIXME: Move this to rewriteInstructionsForSpills.
1351 // Fold the store into the def if possible.
1352 bool Folded = false;
1353 if (CanFold && !Ops.empty()) {
1354 if (tryFoldMemoryOperand(MI, vrm, NULL, index, Ops, true, Slot,VReg)){
1357 // Also folded uses, do not issue a load.
1358 eraseRestoreInfo(Id, index, VReg, RestoreMBBs, RestoreIdxes);
1359 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1361 nI.removeRange(getDefIndex(index), getStoreIndex(index));
1365 // Else tell the spiller to issue a spill.
1367 LiveRange *LR = &nI.ranges[nI.ranges.size()-1];
1368 bool isKill = LR->end == getStoreIndex(index);
1369 vrm.addSpillPoint(VReg, isKill, MI);
1371 AddedKill.insert(&nI);
1374 Id = SpillMBBs.find_next(Id);
1378 int Id = RestoreMBBs.find_first();
1380 std::vector<SRInfo> &restores = RestoreIdxes[Id];
1381 for (unsigned i = 0, e = restores.size(); i != e; ++i) {
1382 int index = restores[i].index;
1385 unsigned VReg = restores[i].vreg;
1386 LiveInterval &nI = getOrCreateInterval(VReg);
1387 MachineInstr *MI = getInstructionFromIndex(index);
1388 bool CanFold = false;
1390 if (restores[i].canFold) {
1392 for (unsigned j = 0, ee = MI->getNumOperands(); j != ee; ++j) {
1393 MachineOperand &MO = MI->getOperand(j);
1394 if (!MO.isRegister() || MO.getReg() != VReg)
1398 // If this restore were to be folded, it would have been folded
1407 // Fold the load into the use if possible.
1408 bool Folded = false;
1409 if (CanFold && !Ops.empty()) {
1410 if (!vrm.isReMaterialized(VReg))
1411 Folded = tryFoldMemoryOperand(MI, vrm, NULL,index,Ops,true,Slot,VReg);
1413 MachineInstr *ReMatDefMI = vrm.getReMaterializedMI(VReg);
1415 bool isLoadSS = tii_->isLoadFromStackSlot(ReMatDefMI, LdSlot);
1416 // If the rematerializable def is a load, also try to fold it.
1417 if (isLoadSS || ReMatDefMI->getDesc().isSimpleLoad())
1418 Folded = tryFoldMemoryOperand(MI, vrm, ReMatDefMI, index,
1419 Ops, isLoadSS, LdSlot, VReg);
1422 // If folding is not possible / failed, then tell the spiller to issue a
1423 // load / rematerialization for us.
1425 nI.removeRange(getLoadIndex(index), getUseIndex(index)+1);
1427 vrm.addRestorePoint(VReg, MI);
1429 Id = RestoreMBBs.find_next(Id);
1432 // Finalize intervals: add kills, finalize spill weights, and filter out
1434 std::vector<LiveInterval*> RetNewLIs;
1435 for (unsigned i = 0, e = NewLIs.size(); i != e; ++i) {
1436 LiveInterval *LI = NewLIs[i];
1438 LI->weight /= LI->getSize();
1439 if (!AddedKill.count(LI)) {
1440 LiveRange *LR = &LI->ranges[LI->ranges.size()-1];
1441 unsigned LastUseIdx = getBaseIndex(LR->end);
1442 MachineInstr *LastUse = getInstructionFromIndex(LastUseIdx);
1443 int UseIdx = LastUse->findRegisterUseOperandIdx(LI->reg);
1444 assert(UseIdx != -1);
1445 if (LastUse->getDesc().getOperandConstraint(UseIdx, TOI::TIED_TO) ==
1447 LastUse->getOperand(UseIdx).setIsKill();
1448 vrm.addKillPoint(LI->reg, LastUseIdx);
1451 RetNewLIs.push_back(LI);