1 //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the VirtRegMap class.
12 // It also contains implementations of the the Spiller interface, which, given a
13 // virtual register map and a machine function, eliminates all virtual
14 // references by replacing them with physical register references - adding spill
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "spiller"
20 #include "VirtRegMap.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineInstrBuilder.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/Target/TargetMachine.h"
27 #include "llvm/Target/TargetInstrInfo.h"
28 #include "llvm/Support/CommandLine.h"
29 #include "llvm/Support/Compiler.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/ADT/BitVector.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/DepthFirstIterator.h"
34 #include "llvm/ADT/Statistic.h"
35 #include "llvm/ADT/STLExtras.h"
36 #include "llvm/ADT/SmallSet.h"
40 STATISTIC(NumSpills , "Number of register spills");
41 STATISTIC(NumPSpills , "Number of physical register spills");
42 STATISTIC(NumReMats , "Number of re-materialization");
43 STATISTIC(NumDRM , "Number of re-materializable defs elided");
44 STATISTIC(NumStores , "Number of stores added");
45 STATISTIC(NumLoads , "Number of loads added");
46 STATISTIC(NumReused , "Number of values reused");
47 STATISTIC(NumDSE , "Number of dead stores elided");
48 STATISTIC(NumDCE , "Number of copies elided");
49 STATISTIC(NumDSS , "Number of dead spill slots removed");
50 STATISTIC(NumCommutes, "Number of instructions commuted");
51 STATISTIC(NumOmitted , "Number of reloads omited");
52 STATISTIC(NumCopified, "Number of available reloads turned into copies");
55 enum SpillerName { simple, local };
58 static cl::opt<SpillerName>
60 cl::desc("Spiller to use: (default: local)"),
62 cl::values(clEnumVal(simple, "simple spiller"),
63 clEnumVal(local, "local spiller"),
67 //===----------------------------------------------------------------------===//
68 // VirtRegMap implementation
69 //===----------------------------------------------------------------------===//
71 VirtRegMap::VirtRegMap(MachineFunction &mf)
72 : TII(*mf.getTarget().getInstrInfo()), MF(mf),
73 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
74 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
75 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1),
76 LowSpillSlot(NO_STACK_SLOT), HighSpillSlot(NO_STACK_SLOT) {
77 SpillSlotToUsesMap.resize(8);
78 ImplicitDefed.resize(MF.getRegInfo().getLastVirtReg()+1-
79 TargetRegisterInfo::FirstVirtualRegister);
83 void VirtRegMap::grow() {
84 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg();
85 Virt2PhysMap.grow(LastVirtReg);
86 Virt2StackSlotMap.grow(LastVirtReg);
87 Virt2ReMatIdMap.grow(LastVirtReg);
88 Virt2SplitMap.grow(LastVirtReg);
89 Virt2SplitKillMap.grow(LastVirtReg);
90 ReMatMap.grow(LastVirtReg);
91 ImplicitDefed.resize(LastVirtReg-TargetRegisterInfo::FirstVirtualRegister+1);
94 int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
95 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
96 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
97 "attempt to assign stack slot to already spilled register");
98 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg);
99 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
101 if (LowSpillSlot == NO_STACK_SLOT)
103 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
105 unsigned Idx = SS-LowSpillSlot;
106 while (Idx >= SpillSlotToUsesMap.size())
107 SpillSlotToUsesMap.resize(SpillSlotToUsesMap.size()*2);
108 Virt2StackSlotMap[virtReg] = SS;
113 void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int SS) {
114 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
115 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
116 "attempt to assign stack slot to already spilled register");
118 (SS >= MF.getFrameInfo()->getObjectIndexBegin())) &&
119 "illegal fixed frame index");
120 Virt2StackSlotMap[virtReg] = SS;
123 int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
124 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
125 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
126 "attempt to assign re-mat id to already spilled register");
127 Virt2ReMatIdMap[virtReg] = ReMatId;
131 void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
132 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
133 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
134 "attempt to assign re-mat id to already spilled register");
135 Virt2ReMatIdMap[virtReg] = id;
138 int VirtRegMap::getEmergencySpillSlot(const TargetRegisterClass *RC) {
139 std::map<const TargetRegisterClass*, int>::iterator I =
140 EmergencySpillSlots.find(RC);
141 if (I != EmergencySpillSlots.end())
143 int SS = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
145 if (LowSpillSlot == NO_STACK_SLOT)
147 if (HighSpillSlot == NO_STACK_SLOT || SS > HighSpillSlot)
149 EmergencySpillSlots[RC] = SS;
153 void VirtRegMap::addSpillSlotUse(int FI, MachineInstr *MI) {
154 if (!MF.getFrameInfo()->isFixedObjectIndex(FI)) {
155 // If FI < LowSpillSlot, this stack reference was produced by
156 // instruction selection and is not a spill
157 if (FI >= LowSpillSlot) {
158 assert(FI >= 0 && "Spill slot index should not be negative!");
159 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
160 && "Invalid spill slot");
161 SpillSlotToUsesMap[FI-LowSpillSlot].insert(MI);
166 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
167 MachineInstr *NewMI, ModRef MRInfo) {
168 // Move previous memory references folded to new instruction.
169 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
170 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
171 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
172 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
173 MI2VirtMap.erase(I++);
176 // add new memory reference
177 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
180 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) {
181 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI);
182 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
185 void VirtRegMap::RemoveMachineInstrFromMaps(MachineInstr *MI) {
186 for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
187 MachineOperand &MO = MI->getOperand(i);
190 int FI = MO.getIndex();
191 if (MF.getFrameInfo()->isFixedObjectIndex(FI))
193 // This stack reference was produced by instruction selection and
195 if (FI < LowSpillSlot)
197 assert((unsigned)FI-LowSpillSlot < SpillSlotToUsesMap.size()
198 && "Invalid spill slot");
199 SpillSlotToUsesMap[FI-LowSpillSlot].erase(MI);
201 MI2VirtMap.erase(MI);
202 SpillPt2VirtMap.erase(MI);
203 RestorePt2VirtMap.erase(MI);
204 EmergencySpillMap.erase(MI);
207 void VirtRegMap::print(std::ostream &OS) const {
208 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo();
210 OS << "********** REGISTER MAP **********\n";
211 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
212 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) {
213 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
214 OS << "[reg" << i << " -> " << TRI->getName(Virt2PhysMap[i])
218 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
219 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i)
220 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT)
221 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n";
225 void VirtRegMap::dump() const {
230 //===----------------------------------------------------------------------===//
231 // Simple Spiller Implementation
232 //===----------------------------------------------------------------------===//
234 Spiller::~Spiller() {}
237 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller {
238 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM);
242 bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
243 DOUT << "********** REWRITE MACHINE CODE **********\n";
244 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
245 const TargetMachine &TM = MF.getTarget();
246 const TargetInstrInfo &TII = *TM.getInstrInfo();
247 const TargetRegisterInfo &TRI = *TM.getRegisterInfo();
250 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
251 // each vreg once (in the case where a spilled vreg is used by multiple
252 // operands). This is always smaller than the number of operands to the
253 // current machine instr, so it should be small.
254 std::vector<unsigned> LoadedRegs;
256 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
258 DOUT << MBBI->getBasicBlock()->getName() << ":\n";
259 MachineBasicBlock &MBB = *MBBI;
260 for (MachineBasicBlock::iterator MII = MBB.begin(),
261 E = MBB.end(); MII != E; ++MII) {
262 MachineInstr &MI = *MII;
263 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
264 MachineOperand &MO = MI.getOperand(i);
265 if (MO.isReg() && MO.getReg()) {
266 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
267 unsigned VirtReg = MO.getReg();
268 unsigned SubIdx = MO.getSubReg();
269 unsigned PhysReg = VRM.getPhys(VirtReg);
270 unsigned RReg = SubIdx ? TRI.getSubReg(PhysReg, SubIdx) : PhysReg;
271 if (!VRM.isAssignedReg(VirtReg)) {
272 int StackSlot = VRM.getStackSlot(VirtReg);
273 const TargetRegisterClass* RC =
274 MF.getRegInfo().getRegClass(VirtReg);
277 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
278 == LoadedRegs.end()) {
279 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
280 MachineInstr *LoadMI = prior(MII);
281 VRM.addSpillSlotUse(StackSlot, LoadMI);
282 LoadedRegs.push_back(VirtReg);
284 DOUT << '\t' << *LoadMI;
288 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
290 MachineInstr *StoreMI = next(MII);
291 VRM.addSpillSlotUse(StackSlot, StoreMI);
295 MF.getRegInfo().setPhysRegUsed(RReg);
296 MI.getOperand(i).setReg(RReg);
298 MF.getRegInfo().setPhysRegUsed(MO.getReg());
310 //===----------------------------------------------------------------------===//
311 // Local Spiller Implementation
312 //===----------------------------------------------------------------------===//
314 /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from
315 /// top down, keep track of which spills slots or remat are available in each
318 /// Note that not all physregs are created equal here. In particular, some
319 /// physregs are reloads that we are allowed to clobber or ignore at any time.
320 /// Other physregs are values that the register allocated program is using that
321 /// we cannot CHANGE, but we can read if we like. We keep track of this on a
322 /// per-stack-slot / remat id basis as the low bit in the value of the
323 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
324 /// this bit and addAvailable sets it if.
326 class VISIBILITY_HIDDEN AvailableSpills {
327 const TargetRegisterInfo *TRI;
328 const TargetInstrInfo *TII;
330 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
331 // or remat'ed virtual register values that are still available, due to being
332 // loaded or stored to, but not invalidated yet.
333 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
335 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
336 // indicating which stack slot values are currently held by a physreg. This
337 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
338 // physreg is modified.
339 std::multimap<unsigned, int> PhysRegsAvailable;
341 void disallowClobberPhysRegOnly(unsigned PhysReg);
343 void ClobberPhysRegOnly(unsigned PhysReg);
345 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
346 : TRI(tri), TII(tii) {
349 /// clear - Reset the state.
351 SpillSlotsOrReMatsAvailable.clear();
352 PhysRegsAvailable.clear();
355 const TargetRegisterInfo *getRegInfo() const { return TRI; }
357 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
358 /// available in a physical register, return that PhysReg, otherwise
360 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
361 std::map<int, unsigned>::const_iterator I =
362 SpillSlotsOrReMatsAvailable.find(Slot);
363 if (I != SpillSlotsOrReMatsAvailable.end()) {
364 return I->second >> 1; // Remove the CanClobber bit.
369 /// addAvailable - Mark that the specified stack slot / remat is available in
370 /// the specified physreg. If CanClobber is true, the physreg can be modified
371 /// at any time without changing the semantics of the program.
372 void addAvailable(int SlotOrReMat, unsigned Reg, bool CanClobber = true) {
373 // If this stack slot is thought to be available in some other physreg,
374 // remove its record.
375 ModifyStackSlotOrReMat(SlotOrReMat);
377 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
378 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber;
380 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
381 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1;
383 DOUT << "Remembering SS#" << SlotOrReMat;
384 DOUT << " in physreg " << TRI->getName(Reg) << "\n";
387 /// canClobberPhysReg - Return true if the spiller is allowed to change the
388 /// value of the specified stackslot register if it desires. The specified
389 /// stack slot must be available in a physreg for this query to make sense.
390 bool canClobberPhysReg(int SlotOrReMat) const {
391 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
392 "Value not available!");
393 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
396 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
397 /// stackslot register. The register is still available but is no longer
398 /// allowed to be modifed.
399 void disallowClobberPhysReg(unsigned PhysReg);
401 /// ClobberPhysReg - This is called when the specified physreg changes
402 /// value. We use this to invalidate any info about stuff that lives in
403 /// it and any of its aliases.
404 void ClobberPhysReg(unsigned PhysReg);
406 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
407 /// slot changes. This removes information about which register the previous
408 /// value for this slot lives in (as the previous value is dead now).
409 void ModifyStackSlotOrReMat(int SlotOrReMat);
411 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
412 /// into the specified MBB. Add available physical registers as potential
413 /// live-in's. If they are reused in the MBB, they will be added to the
414 /// live-in set to make register scavenger and post-allocation scheduler.
415 void AddAvailableRegsToLiveIn(MachineBasicBlock &MBB, BitVector &RegKills,
416 std::vector<MachineOperand*> &KillOps);
420 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
421 /// stackslot register. The register is still available but is no longer
422 /// allowed to be modifed.
423 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
424 std::multimap<unsigned, int>::iterator I =
425 PhysRegsAvailable.lower_bound(PhysReg);
426 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
427 int SlotOrReMat = I->second;
429 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
430 "Bidirectional map mismatch!");
431 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
432 DOUT << "PhysReg " << TRI->getName(PhysReg)
433 << " copied, it is available for use but can no longer be modified\n";
437 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
438 /// stackslot register and its aliases. The register and its aliases may
439 /// still available but is no longer allowed to be modifed.
440 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
441 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
442 disallowClobberPhysRegOnly(*AS);
443 disallowClobberPhysRegOnly(PhysReg);
446 /// ClobberPhysRegOnly - This is called when the specified physreg changes
447 /// value. We use this to invalidate any info about stuff we thing lives in it.
448 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
449 std::multimap<unsigned, int>::iterator I =
450 PhysRegsAvailable.lower_bound(PhysReg);
451 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
452 int SlotOrReMat = I->second;
453 PhysRegsAvailable.erase(I++);
454 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
455 "Bidirectional map mismatch!");
456 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
457 DOUT << "PhysReg " << TRI->getName(PhysReg)
458 << " clobbered, invalidating ";
459 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
460 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
462 DOUT << "SS#" << SlotOrReMat << "\n";
466 /// ClobberPhysReg - This is called when the specified physreg changes
467 /// value. We use this to invalidate any info about stuff we thing lives in
468 /// it and any of its aliases.
469 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
470 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
471 ClobberPhysRegOnly(*AS);
472 ClobberPhysRegOnly(PhysReg);
475 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
476 /// slot changes. This removes information about which register the previous
477 /// value for this slot lives in (as the previous value is dead now).
478 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
479 std::map<int, unsigned>::iterator It =
480 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
481 if (It == SpillSlotsOrReMatsAvailable.end()) return;
482 unsigned Reg = It->second >> 1;
483 SpillSlotsOrReMatsAvailable.erase(It);
485 // This register may hold the value of multiple stack slots, only remove this
486 // stack slot from the set of values the register contains.
487 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
489 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
490 "Map inverse broken!");
491 if (I->second == SlotOrReMat) break;
493 PhysRegsAvailable.erase(I);
496 /// InvalidateKill - A MI that defines the specified register is being deleted,
497 /// invalidate the register kill information.
498 static void InvalidateKill(unsigned Reg, BitVector &RegKills,
499 std::vector<MachineOperand*> &KillOps) {
501 KillOps[Reg]->setIsKill(false);
507 /// AddAvailableRegsToLiveIn - Availability information is being kept coming
508 /// into the specified MBB. Add available physical registers as potential
509 /// live-in's. If they are reused in the MBB, they will be added to the
510 /// live-in set to make register scavenger and post-allocation scheduler.
511 void AvailableSpills::AddAvailableRegsToLiveIn(MachineBasicBlock &MBB,
513 std::vector<MachineOperand*> &KillOps) {
514 std::set<unsigned> NotAvailable;
515 for (std::multimap<unsigned, int>::iterator
516 I = PhysRegsAvailable.begin(), E = PhysRegsAvailable.end();
518 unsigned Reg = I->first;
519 const TargetRegisterClass* RC = TRI->getPhysicalRegisterRegClass(Reg);
520 // FIXME: A temporary workaround. We can't reuse available value if it's
521 // not safe to move the def of the virtual register's class. e.g.
522 // X86::RFP* register classes. Do not add it as a live-in.
523 if (!TII->isSafeToMoveRegClassDefs(RC))
524 // This is no longer available.
525 NotAvailable.insert(Reg);
528 InvalidateKill(Reg, RegKills, KillOps);
531 // Skip over the same register.
532 std::multimap<unsigned, int>::iterator NI = next(I);
533 while (NI != E && NI->first == Reg) {
539 for (std::set<unsigned>::iterator I = NotAvailable.begin(),
540 E = NotAvailable.end(); I != E; ++I) {
542 for (const unsigned *SubRegs = TRI->getSubRegisters(*I);
544 ClobberPhysReg(*SubRegs);
548 /// findSinglePredSuccessor - Return via reference a vector of machine basic
549 /// blocks each of which is a successor of the specified BB and has no other
551 static void findSinglePredSuccessor(MachineBasicBlock *MBB,
552 SmallVectorImpl<MachineBasicBlock *> &Succs) {
553 for (MachineBasicBlock::succ_iterator SI = MBB->succ_begin(),
554 SE = MBB->succ_end(); SI != SE; ++SI) {
555 MachineBasicBlock *SuccMBB = *SI;
556 if (SuccMBB->pred_size() == 1)
557 Succs.push_back(SuccMBB);
562 /// LocalSpiller - This spiller does a simple pass over the machine basic
563 /// block to attempt to keep spills in registers as much as possible for
564 /// blocks that have low register pressure (the vreg may be spilled due to
565 /// register pressure in other blocks).
566 class VISIBILITY_HIDDEN LocalSpiller : public Spiller {
567 MachineRegisterInfo *RegInfo;
568 const TargetRegisterInfo *TRI;
569 const TargetInstrInfo *TII;
570 DenseMap<MachineInstr*, unsigned> DistanceMap;
572 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
573 RegInfo = &MF.getRegInfo();
574 TRI = MF.getTarget().getRegisterInfo();
575 TII = MF.getTarget().getInstrInfo();
576 DOUT << "\n**** Local spiller rewriting function '"
577 << MF.getFunction()->getName() << "':\n";
578 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
582 // Spills - Keep track of which spilled values are available in physregs
583 // so that we can choose to reuse the physregs instead of emitting
584 // reloads. This is usually refreshed per basic block.
585 AvailableSpills Spills(TRI, TII);
587 // Keep track of kill information.
588 BitVector RegKills(TRI->getNumRegs());
589 std::vector<MachineOperand*> KillOps;
590 KillOps.resize(TRI->getNumRegs(), NULL);
592 // SingleEntrySuccs - Successor blocks which have a single predecessor.
593 SmallVector<MachineBasicBlock*, 4> SinglePredSuccs;
594 SmallPtrSet<MachineBasicBlock*,16> EarlyVisited;
596 // Traverse the basic blocks depth first.
597 MachineBasicBlock *Entry = MF.begin();
598 SmallPtrSet<MachineBasicBlock*,16> Visited;
599 for (df_ext_iterator<MachineBasicBlock*,
600 SmallPtrSet<MachineBasicBlock*,16> >
601 DFI = df_ext_begin(Entry, Visited), E = df_ext_end(Entry, Visited);
603 MachineBasicBlock *MBB = *DFI;
604 if (!EarlyVisited.count(MBB))
605 RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
607 // If this MBB is the only predecessor of a successor. Keep the
608 // availability information and visit it next.
610 // Keep visiting single predecessor successor as long as possible.
611 SinglePredSuccs.clear();
612 findSinglePredSuccessor(MBB, SinglePredSuccs);
613 if (SinglePredSuccs.empty())
616 // FIXME: More than one successors, each of which has MBB has
617 // the only predecessor.
618 MBB = SinglePredSuccs[0];
619 if (!Visited.count(MBB) && EarlyVisited.insert(MBB)) {
620 Spills.AddAvailableRegsToLiveIn(*MBB, RegKills, KillOps);
621 RewriteMBB(*MBB, VRM, Spills, RegKills, KillOps);
626 // Clear the availability info.
630 DOUT << "**** Post Machine Instrs ****\n";
633 // Mark unused spill slots.
634 MachineFrameInfo *MFI = MF.getFrameInfo();
635 int SS = VRM.getLowSpillSlot();
636 if (SS != VirtRegMap::NO_STACK_SLOT)
637 for (int e = VRM.getHighSpillSlot(); SS <= e; ++SS)
638 if (!VRM.isSpillSlotUsed(SS)) {
639 MFI->RemoveStackObject(SS);
646 void TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
647 unsigned Reg, BitVector &RegKills,
648 std::vector<MachineOperand*> &KillOps);
649 bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
650 MachineBasicBlock::iterator &MII,
651 std::vector<MachineInstr*> &MaybeDeadStores,
652 AvailableSpills &Spills, BitVector &RegKills,
653 std::vector<MachineOperand*> &KillOps,
655 bool CommuteToFoldReload(MachineBasicBlock &MBB,
656 MachineBasicBlock::iterator &MII,
657 unsigned VirtReg, unsigned SrcReg, int SS,
658 AvailableSpills &Spills,
660 std::vector<MachineOperand*> &KillOps,
661 const TargetRegisterInfo *TRI,
663 void SpillRegToStackSlot(MachineBasicBlock &MBB,
664 MachineBasicBlock::iterator &MII,
665 int Idx, unsigned PhysReg, int StackSlot,
666 const TargetRegisterClass *RC,
667 bool isAvailable, MachineInstr *&LastStore,
668 AvailableSpills &Spills,
669 SmallSet<MachineInstr*, 4> &ReMatDefs,
671 std::vector<MachineOperand*> &KillOps,
673 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
674 AvailableSpills &Spills,
675 BitVector &RegKills, std::vector<MachineOperand*> &KillOps);
679 /// InvalidateKills - MI is going to be deleted. If any of its operands are
680 /// marked kill, then invalidate the information.
681 static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
682 std::vector<MachineOperand*> &KillOps,
683 SmallVector<unsigned, 2> *KillRegs = NULL) {
684 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
685 MachineOperand &MO = MI.getOperand(i);
686 if (!MO.isReg() || !MO.isUse() || !MO.isKill())
688 unsigned Reg = MO.getReg();
689 if (TargetRegisterInfo::isVirtualRegister(Reg))
692 KillRegs->push_back(Reg);
693 assert(Reg < KillOps.size());
694 if (KillOps[Reg] == &MO) {
701 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
702 /// (since it's spill instruction is removed), mark it isDead. Also checks if
703 /// the def MI has other definition operands that are not dead. Returns it by
705 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
706 MachineInstr &NewDef, unsigned Reg,
708 // Due to remat, it's possible this reg isn't being reused. That is,
709 // the def of this reg (by prev MI) is now dead.
710 MachineInstr *DefMI = I;
711 MachineOperand *DefOp = NULL;
712 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
713 MachineOperand &MO = DefMI->getOperand(i);
714 if (MO.isReg() && MO.isDef()) {
715 if (MO.getReg() == Reg)
717 else if (!MO.isDead())
724 bool FoundUse = false, Done = false;
725 MachineBasicBlock::iterator E = &NewDef;
727 for (; !Done && I != E; ++I) {
728 MachineInstr *NMI = I;
729 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
730 MachineOperand &MO = NMI->getOperand(j);
731 if (!MO.isReg() || MO.getReg() != Reg)
735 Done = true; // Stop after scanning all the operands of this MI.
746 /// UpdateKills - Track and update kill info. If a MI reads a register that is
747 /// marked kill, then it must be due to register reuse. Transfer the kill info
749 static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
750 std::vector<MachineOperand*> &KillOps,
751 const TargetRegisterInfo* TRI) {
752 const TargetInstrDesc &TID = MI.getDesc();
753 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
754 MachineOperand &MO = MI.getOperand(i);
755 if (!MO.isReg() || !MO.isUse())
757 unsigned Reg = MO.getReg();
761 if (RegKills[Reg] && KillOps[Reg]->getParent() != &MI) {
762 // That can't be right. Register is killed but not re-defined and it's
763 // being reused. Let's fix that.
764 KillOps[Reg]->setIsKill(false);
767 if (i < TID.getNumOperands() &&
768 TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
769 // Unless it's a two-address operand, this is the new kill.
778 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
779 const MachineOperand &MO = MI.getOperand(i);
780 if (!MO.isReg() || !MO.isDef())
782 unsigned Reg = MO.getReg();
785 // It also defines (or partially define) aliases.
786 for (const unsigned *AS = TRI->getAliasSet(Reg); *AS; ++AS) {
793 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
795 static void ReMaterialize(MachineBasicBlock &MBB,
796 MachineBasicBlock::iterator &MII,
797 unsigned DestReg, unsigned Reg,
798 const TargetInstrInfo *TII,
799 const TargetRegisterInfo *TRI,
801 TII->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
802 MachineInstr *NewMI = prior(MII);
803 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
804 MachineOperand &MO = NewMI->getOperand(i);
805 if (!MO.isReg() || MO.getReg() == 0)
807 unsigned VirtReg = MO.getReg();
808 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
811 unsigned SubIdx = MO.getSubReg();
812 unsigned Phys = VRM.getPhys(VirtReg);
814 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
821 // ReusedOp - For each reused operand, we keep track of a bit of information, in
822 // case we need to rollback upon processing a new operand. See comments below.
825 // The MachineInstr operand that reused an available value.
828 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
829 unsigned StackSlotOrReMat;
831 // PhysRegReused - The physical register the value was available in.
832 unsigned PhysRegReused;
834 // AssignedPhysReg - The physreg that was assigned for use by the reload.
835 unsigned AssignedPhysReg;
837 // VirtReg - The virtual register itself.
840 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
842 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
843 AssignedPhysReg(apr), VirtReg(vreg) {}
846 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
847 /// is reused instead of reloaded.
848 class VISIBILITY_HIDDEN ReuseInfo {
850 std::vector<ReusedOp> Reuses;
851 BitVector PhysRegsClobbered;
853 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
854 PhysRegsClobbered.resize(tri->getNumRegs());
857 bool hasReuses() const {
858 return !Reuses.empty();
861 /// addReuse - If we choose to reuse a virtual register that is already
862 /// available instead of reloading it, remember that we did so.
863 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
864 unsigned PhysRegReused, unsigned AssignedPhysReg,
866 // If the reload is to the assigned register anyway, no undo will be
868 if (PhysRegReused == AssignedPhysReg) return;
870 // Otherwise, remember this.
871 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
872 AssignedPhysReg, VirtReg));
875 void markClobbered(unsigned PhysReg) {
876 PhysRegsClobbered.set(PhysReg);
879 bool isClobbered(unsigned PhysReg) const {
880 return PhysRegsClobbered.test(PhysReg);
883 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
884 /// is some other operand that is using the specified register, either pick
885 /// a new register to use, or evict the previous reload and use this reg.
886 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
887 AvailableSpills &Spills,
888 std::vector<MachineInstr*> &MaybeDeadStores,
889 SmallSet<unsigned, 8> &Rejected,
891 std::vector<MachineOperand*> &KillOps,
893 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
896 if (Reuses.empty()) return PhysReg; // This is most often empty.
898 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
899 ReusedOp &Op = Reuses[ro];
900 // If we find some other reuse that was supposed to use this register
901 // exactly for its reload, we can change this reload to use ITS reload
902 // register. That is, unless its reload register has already been
903 // considered and subsequently rejected because it has also been reused
904 // by another operand.
905 if (Op.PhysRegReused == PhysReg &&
906 Rejected.count(Op.AssignedPhysReg) == 0) {
907 // Yup, use the reload register that we didn't use before.
908 unsigned NewReg = Op.AssignedPhysReg;
909 Rejected.insert(PhysReg);
910 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
911 RegKills, KillOps, VRM);
913 // Otherwise, we might also have a problem if a previously reused
914 // value aliases the new register. If so, codegen the previous reload
916 unsigned PRRU = Op.PhysRegReused;
917 const TargetRegisterInfo *TRI = Spills.getRegInfo();
918 if (TRI->areAliases(PRRU, PhysReg)) {
919 // Okay, we found out that an alias of a reused register
920 // was used. This isn't good because it means we have
921 // to undo a previous reuse.
922 MachineBasicBlock *MBB = MI->getParent();
923 const TargetRegisterClass *AliasRC =
924 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
926 // Copy Op out of the vector and remove it, we're going to insert an
927 // explicit load for it.
929 Reuses.erase(Reuses.begin()+ro);
931 // Ok, we're going to try to reload the assigned physreg into the
932 // slot that we were supposed to in the first place. However, that
933 // register could hold a reuse. Check to see if it conflicts or
934 // would prefer us to use a different register.
935 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
936 MI, Spills, MaybeDeadStores,
937 Rejected, RegKills, KillOps, VRM);
939 MachineBasicBlock::iterator MII = MI;
940 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
941 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TII, TRI,VRM);
943 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
944 NewOp.StackSlotOrReMat, AliasRC);
945 MachineInstr *LoadMI = prior(MII);
946 VRM.addSpillSlotUse(NewOp.StackSlotOrReMat, LoadMI);
947 // Any stores to this stack slot are not dead anymore.
948 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
951 Spills.ClobberPhysReg(NewPhysReg);
952 Spills.ClobberPhysReg(NewOp.PhysRegReused);
954 unsigned SubIdx = MI->getOperand(NewOp.Operand).getSubReg();
955 unsigned RReg = SubIdx ? TRI->getSubReg(NewPhysReg, SubIdx) : NewPhysReg;
956 MI->getOperand(NewOp.Operand).setReg(RReg);
958 Spills.addAvailable(NewOp.StackSlotOrReMat, NewPhysReg);
960 UpdateKills(*MII, RegKills, KillOps, TRI);
961 DOUT << '\t' << *MII;
963 DOUT << "Reuse undone!\n";
966 // Finally, PhysReg is now available, go ahead and use it.
974 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
975 /// 'Rejected' set to remember which registers have been considered and
976 /// rejected for the reload. This avoids infinite looping in case like
979 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
980 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
982 /// sees r1 is taken by t2, tries t2's reload register r0
983 /// sees r0 is taken by t3, tries t3's reload register r1
984 /// sees r1 is taken by t2, tries t2's reload register r0 ...
985 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
986 AvailableSpills &Spills,
987 std::vector<MachineInstr*> &MaybeDeadStores,
989 std::vector<MachineOperand*> &KillOps,
991 SmallSet<unsigned, 8> Rejected;
992 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected,
993 RegKills, KillOps, VRM);
998 /// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
999 /// instruction. e.g.
1001 /// movl %eax, -32(%ebp)
1002 /// movl -36(%ebp), %eax
1003 /// orl %eax, -32(%ebp)
1006 /// orl -36(%ebp), %eax
1007 /// mov %eax, -32(%ebp)
1008 /// This enables unfolding optimization for a subsequent instruction which will
1009 /// also eliminate the newly introduced store instruction.
1010 bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
1011 MachineBasicBlock::iterator &MII,
1012 std::vector<MachineInstr*> &MaybeDeadStores,
1013 AvailableSpills &Spills,
1014 BitVector &RegKills,
1015 std::vector<MachineOperand*> &KillOps,
1017 MachineFunction &MF = *MBB.getParent();
1018 MachineInstr &MI = *MII;
1019 unsigned UnfoldedOpc = 0;
1020 unsigned UnfoldPR = 0;
1021 unsigned UnfoldVR = 0;
1022 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
1023 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1024 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1025 // Only transform a MI that folds a single register.
1028 UnfoldVR = I->second.first;
1029 VirtRegMap::ModRef MR = I->second.second;
1030 // MI2VirtMap be can updated which invalidate the iterator.
1031 // Increment the iterator first.
1033 if (VRM.isAssignedReg(UnfoldVR))
1035 // If this reference is not a use, any previous store is now dead.
1036 // Otherwise, the store to this stack slot is not dead anymore.
1037 FoldedSS = VRM.getStackSlot(UnfoldVR);
1038 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
1039 if (DeadStore && (MR & VirtRegMap::isModRef)) {
1040 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
1041 if (!PhysReg || !DeadStore->readsRegister(PhysReg))
1044 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
1052 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1053 MachineOperand &MO = MI.getOperand(i);
1054 if (!MO.isReg() || MO.getReg() == 0 || !MO.isUse())
1056 unsigned VirtReg = MO.getReg();
1057 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
1059 if (VRM.isAssignedReg(VirtReg)) {
1060 unsigned PhysReg = VRM.getPhys(VirtReg);
1061 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
1063 } else if (VRM.isReMaterialized(VirtReg))
1065 int SS = VRM.getStackSlot(VirtReg);
1066 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1068 if (TRI->regsOverlap(PhysReg, UnfoldPR))
1072 if (VRM.hasPhys(VirtReg)) {
1073 PhysReg = VRM.getPhys(VirtReg);
1074 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
1078 // Ok, we'll need to reload the value into a register which makes
1079 // it impossible to perform the store unfolding optimization later.
1080 // Let's see if it is possible to fold the load if the store is
1081 // unfolded. This allows us to perform the store unfolding
1083 SmallVector<MachineInstr*, 4> NewMIs;
1084 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
1085 assert(NewMIs.size() == 1);
1086 MachineInstr *NewMI = NewMIs.back();
1088 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg, false);
1090 SmallVector<unsigned, 1> Ops;
1092 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
1094 VRM.addSpillSlotUse(SS, FoldedMI);
1095 if (!VRM.hasPhys(UnfoldVR))
1096 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
1097 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1098 MII = MBB.insert(MII, FoldedMI);
1099 InvalidateKills(MI, RegKills, KillOps);
1100 VRM.RemoveMachineInstrFromMaps(&MI);
1102 MF.DeleteMachineInstr(NewMI);
1105 MF.DeleteMachineInstr(NewMI);
1111 /// CommuteToFoldReload -
1114 /// r1 = op r1, r2<kill>
1117 /// If op is commutable and r2 is killed, then we can xform these to
1118 /// r2 = op r2, fi#1
1120 bool LocalSpiller::CommuteToFoldReload(MachineBasicBlock &MBB,
1121 MachineBasicBlock::iterator &MII,
1122 unsigned VirtReg, unsigned SrcReg, int SS,
1123 AvailableSpills &Spills,
1124 BitVector &RegKills,
1125 std::vector<MachineOperand*> &KillOps,
1126 const TargetRegisterInfo *TRI,
1128 if (MII == MBB.begin() || !MII->killsRegister(SrcReg))
1131 MachineFunction &MF = *MBB.getParent();
1132 MachineInstr &MI = *MII;
1133 MachineBasicBlock::iterator DefMII = prior(MII);
1134 MachineInstr *DefMI = DefMII;
1135 const TargetInstrDesc &TID = DefMI->getDesc();
1137 if (DefMII != MBB.begin() &&
1138 TID.isCommutable() &&
1139 TII->CommuteChangesDestination(DefMI, NewDstIdx)) {
1140 MachineOperand &NewDstMO = DefMI->getOperand(NewDstIdx);
1141 unsigned NewReg = NewDstMO.getReg();
1142 if (!NewDstMO.isKill() || TRI->regsOverlap(NewReg, SrcReg))
1144 MachineInstr *ReloadMI = prior(DefMII);
1146 unsigned DestReg = TII->isLoadFromStackSlot(ReloadMI, FrameIdx);
1147 if (DestReg != SrcReg || FrameIdx != SS)
1149 int UseIdx = DefMI->findRegisterUseOperandIdx(DestReg, false);
1152 int DefIdx = TID.getOperandConstraint(UseIdx, TOI::TIED_TO);
1155 assert(DefMI->getOperand(DefIdx).isReg() &&
1156 DefMI->getOperand(DefIdx).getReg() == SrcReg);
1158 // Now commute def instruction.
1159 MachineInstr *CommutedMI = TII->commuteInstruction(DefMI, true);
1162 SmallVector<unsigned, 1> Ops;
1163 Ops.push_back(NewDstIdx);
1164 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, CommutedMI, Ops, SS);
1165 // Not needed since foldMemoryOperand returns new MI.
1166 MF.DeleteMachineInstr(CommutedMI);
1170 VRM.addSpillSlotUse(SS, FoldedMI);
1171 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
1172 // Insert new def MI and spill MI.
1173 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(VirtReg);
1174 TII->storeRegToStackSlot(MBB, &MI, NewReg, true, SS, RC);
1176 MachineInstr *StoreMI = MII;
1177 VRM.addSpillSlotUse(SS, StoreMI);
1178 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1179 MII = MBB.insert(MII, FoldedMI); // Update MII to backtrack.
1181 // Delete all 3 old instructions.
1182 InvalidateKills(*ReloadMI, RegKills, KillOps);
1183 VRM.RemoveMachineInstrFromMaps(ReloadMI);
1184 MBB.erase(ReloadMI);
1185 InvalidateKills(*DefMI, RegKills, KillOps);
1186 VRM.RemoveMachineInstrFromMaps(DefMI);
1188 InvalidateKills(MI, RegKills, KillOps);
1189 VRM.RemoveMachineInstrFromMaps(&MI);
1192 // If NewReg was previously holding value of some SS, it's now clobbered.
1193 // This has to be done now because it's a physical register. When this
1194 // instruction is re-visited, it's ignored.
1195 Spills.ClobberPhysReg(NewReg);
1204 /// findSuperReg - Find the SubReg's super-register of given register class
1205 /// where its SubIdx sub-register is SubReg.
1206 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
1207 unsigned SubIdx, const TargetRegisterInfo *TRI) {
1208 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
1211 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
1217 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
1218 /// the last store to the same slot is now dead. If so, remove the last store.
1219 void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
1220 MachineBasicBlock::iterator &MII,
1221 int Idx, unsigned PhysReg, int StackSlot,
1222 const TargetRegisterClass *RC,
1223 bool isAvailable, MachineInstr *&LastStore,
1224 AvailableSpills &Spills,
1225 SmallSet<MachineInstr*, 4> &ReMatDefs,
1226 BitVector &RegKills,
1227 std::vector<MachineOperand*> &KillOps,
1229 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
1230 MachineInstr *StoreMI = next(MII);
1231 VRM.addSpillSlotUse(StackSlot, StoreMI);
1232 DOUT << "Store:\t" << *StoreMI;
1234 // If there is a dead store to this stack slot, nuke it now.
1236 DOUT << "Removed dead store:\t" << *LastStore;
1238 SmallVector<unsigned, 2> KillRegs;
1239 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
1240 MachineBasicBlock::iterator PrevMII = LastStore;
1241 bool CheckDef = PrevMII != MBB.begin();
1244 VRM.RemoveMachineInstrFromMaps(LastStore);
1245 MBB.erase(LastStore);
1247 // Look at defs of killed registers on the store. Mark the defs
1248 // as dead since the store has been deleted and they aren't
1250 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
1251 bool HasOtherDef = false;
1252 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
1253 MachineInstr *DeadDef = PrevMII;
1254 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
1255 // FIXME: This assumes a remat def does not have side
1257 VRM.RemoveMachineInstrFromMaps(DeadDef);
1266 LastStore = next(MII);
1268 // If the stack slot value was previously available in some other
1269 // register, change it now. Otherwise, make the register available,
1271 Spills.ModifyStackSlotOrReMat(StackSlot);
1272 Spills.ClobberPhysReg(PhysReg);
1273 Spills.addAvailable(StackSlot, PhysReg, isAvailable);
1277 /// TransferDeadness - A identity copy definition is dead and it's being
1278 /// removed. Find the last def or use and mark it as dead / kill.
1279 void LocalSpiller::TransferDeadness(MachineBasicBlock *MBB, unsigned CurDist,
1280 unsigned Reg, BitVector &RegKills,
1281 std::vector<MachineOperand*> &KillOps) {
1282 int LastUDDist = -1;
1283 MachineInstr *LastUDMI = NULL;
1284 for (MachineRegisterInfo::reg_iterator RI = RegInfo->reg_begin(Reg),
1285 RE = RegInfo->reg_end(); RI != RE; ++RI) {
1286 MachineInstr *UDMI = &*RI;
1287 if (UDMI->getParent() != MBB)
1289 DenseMap<MachineInstr*, unsigned>::iterator DI = DistanceMap.find(UDMI);
1290 if (DI == DistanceMap.end() || DI->second > CurDist)
1292 if ((int)DI->second < LastUDDist)
1294 LastUDDist = DI->second;
1299 const TargetInstrDesc &TID = LastUDMI->getDesc();
1300 MachineOperand *LastUD = NULL;
1301 for (unsigned i = 0, e = LastUDMI->getNumOperands(); i != e; ++i) {
1302 MachineOperand &MO = LastUDMI->getOperand(i);
1303 if (!MO.isReg() || MO.getReg() != Reg)
1305 if (!LastUD || (LastUD->isUse() && MO.isDef()))
1307 if (TID.getOperandConstraint(i, TOI::TIED_TO) != -1)
1310 if (LastUD->isDef())
1311 LastUD->setIsDead();
1313 LastUD->setIsKill();
1315 KillOps[Reg] = LastUD;
1320 /// rewriteMBB - Keep track of which spills are available even after the
1321 /// register allocator is done with them. If possible, avid reloading vregs.
1322 void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM,
1323 AvailableSpills &Spills, BitVector &RegKills,
1324 std::vector<MachineOperand*> &KillOps) {
1325 DOUT << "\n**** Local spiller rewriting MBB '"
1326 << MBB.getBasicBlock()->getName() << ":\n";
1328 MachineFunction &MF = *MBB.getParent();
1330 // MaybeDeadStores - When we need to write a value back into a stack slot,
1331 // keep track of the inserted store. If the stack slot value is never read
1332 // (because the value was used from some available register, for example), and
1333 // subsequently stored to, the original store is dead. This map keeps track
1334 // of inserted stores that are not used. If we see a subsequent store to the
1335 // same stack slot, the original store is deleted.
1336 std::vector<MachineInstr*> MaybeDeadStores;
1337 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
1339 // ReMatDefs - These are rematerializable def MIs which are not deleted.
1340 SmallSet<MachineInstr*, 4> ReMatDefs;
1345 KillOps.resize(TRI->getNumRegs(), NULL);
1348 DistanceMap.clear();
1349 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
1351 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
1353 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
1354 bool Erased = false;
1355 bool BackTracked = false;
1356 if (PrepForUnfoldOpti(MBB, MII,
1357 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
1358 NextMII = next(MII);
1360 MachineInstr &MI = *MII;
1361 const TargetInstrDesc &TID = MI.getDesc();
1363 if (VRM.hasEmergencySpills(&MI)) {
1364 // Spill physical register(s) in the rare case the allocator has run out
1365 // of registers to allocate.
1366 SmallSet<int, 4> UsedSS;
1367 std::vector<unsigned> &EmSpills = VRM.getEmergencySpills(&MI);
1368 for (unsigned i = 0, e = EmSpills.size(); i != e; ++i) {
1369 unsigned PhysReg = EmSpills[i];
1370 const TargetRegisterClass *RC =
1371 TRI->getPhysicalRegisterRegClass(PhysReg);
1372 assert(RC && "Unable to determine register class!");
1373 int SS = VRM.getEmergencySpillSlot(RC);
1374 if (UsedSS.count(SS))
1375 assert(0 && "Need to spill more than one physical registers!");
1377 TII->storeRegToStackSlot(MBB, MII, PhysReg, true, SS, RC);
1378 MachineInstr *StoreMI = prior(MII);
1379 VRM.addSpillSlotUse(SS, StoreMI);
1380 TII->loadRegFromStackSlot(MBB, next(MII), PhysReg, SS, RC);
1381 MachineInstr *LoadMI = next(MII);
1382 VRM.addSpillSlotUse(SS, LoadMI);
1385 NextMII = next(MII);
1388 // Insert restores here if asked to.
1389 if (VRM.isRestorePt(&MI)) {
1390 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
1391 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1392 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1393 if (!VRM.getPreSplitReg(VirtReg))
1394 continue; // Split interval spilled again.
1395 unsigned Phys = VRM.getPhys(VirtReg);
1396 RegInfo->setPhysRegUsed(Phys);
1398 // Check if the value being restored if available. If so, it must be
1399 // from a predecessor BB that fallthrough into this BB. We do not
1405 // ... # r1 not clobbered
1408 bool DoReMat = VRM.isReMaterialized(VirtReg);
1409 int SSorRMId = DoReMat
1410 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1411 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1412 unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1413 if (InReg == Phys) {
1414 // If the value is already available in the expected register, save
1415 // a reload / remat.
1417 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1419 DOUT << "Reusing SS#" << SSorRMId;
1420 DOUT << " from physreg "
1421 << TRI->getName(InReg) << " for vreg"
1422 << VirtReg <<" instead of reloading into physreg "
1423 << TRI->getName(Phys) << "\n";
1426 } else if (InReg && InReg != Phys) {
1428 DOUT << "Reusing RM#" << SSorRMId-VirtRegMap::MAX_STACK_SLOT-1;
1430 DOUT << "Reusing SS#" << SSorRMId;
1431 DOUT << " from physreg "
1432 << TRI->getName(InReg) << " for vreg"
1433 << VirtReg <<" by copying it into physreg "
1434 << TRI->getName(Phys) << "\n";
1436 // If the reloaded / remat value is available in another register,
1437 // copy it to the desired register.
1438 TII->copyRegToReg(MBB, &MI, Phys, InReg, RC, RC);
1440 // This invalidates Phys.
1441 Spills.ClobberPhysReg(Phys);
1442 // Remember it's available.
1443 Spills.addAvailable(SSorRMId, Phys);
1446 MachineInstr *CopyMI = prior(MII);
1447 MachineOperand *KillOpnd = CopyMI->findRegisterUseOperand(InReg);
1448 KillOpnd->setIsKill();
1449 UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1451 DOUT << '\t' << *CopyMI;
1456 if (VRM.isReMaterialized(VirtReg)) {
1457 ReMaterialize(MBB, MII, Phys, VirtReg, TII, TRI, VRM);
1459 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1460 TII->loadRegFromStackSlot(MBB, &MI, Phys, SSorRMId, RC);
1461 MachineInstr *LoadMI = prior(MII);
1462 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1466 // This invalidates Phys.
1467 Spills.ClobberPhysReg(Phys);
1468 // Remember it's available.
1469 Spills.addAvailable(SSorRMId, Phys);
1471 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1472 DOUT << '\t' << *prior(MII);
1476 // Insert spills here if asked to.
1477 if (VRM.isSpillPt(&MI)) {
1478 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1479 VRM.getSpillPtSpills(&MI);
1480 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1481 unsigned VirtReg = SpillRegs[i].first;
1482 bool isKill = SpillRegs[i].second;
1483 if (!VRM.getPreSplitReg(VirtReg))
1484 continue; // Split interval spilled again.
1485 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1486 unsigned Phys = VRM.getPhys(VirtReg);
1487 int StackSlot = VRM.getStackSlot(VirtReg);
1488 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1489 MachineInstr *StoreMI = next(MII);
1490 VRM.addSpillSlotUse(StackSlot, StoreMI);
1491 DOUT << "Store:\t" << *StoreMI;
1492 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1494 NextMII = next(MII);
1497 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1499 ReuseInfo ReusedOperands(MI, TRI);
1500 SmallVector<unsigned, 4> VirtUseOps;
1501 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1502 MachineOperand &MO = MI.getOperand(i);
1503 if (!MO.isReg() || MO.getReg() == 0)
1504 continue; // Ignore non-register operands.
1506 unsigned VirtReg = MO.getReg();
1507 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1508 // Ignore physregs for spilling, but remember that it is used by this
1510 RegInfo->setPhysRegUsed(VirtReg);
1514 // We want to process implicit virtual register uses first.
1515 if (MO.isImplicit())
1516 // If the virtual register is implicitly defined, emit a implicit_def
1517 // before so scavenger knows it's "defined".
1518 VirtUseOps.insert(VirtUseOps.begin(), i);
1520 VirtUseOps.push_back(i);
1523 // Process all of the spilled uses and all non spilled reg references.
1524 SmallVector<int, 2> PotentialDeadStoreSlots;
1525 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1526 unsigned i = VirtUseOps[j];
1527 MachineOperand &MO = MI.getOperand(i);
1528 unsigned VirtReg = MO.getReg();
1529 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1530 "Not a virtual register?");
1532 unsigned SubIdx = MO.getSubReg();
1533 if (VRM.isAssignedReg(VirtReg)) {
1534 // This virtual register was assigned a physreg!
1535 unsigned Phys = VRM.getPhys(VirtReg);
1536 RegInfo->setPhysRegUsed(Phys);
1538 ReusedOperands.markClobbered(Phys);
1539 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1540 MI.getOperand(i).setReg(RReg);
1541 if (VRM.isImplicitlyDefined(VirtReg))
1542 BuildMI(MBB, &MI, MI.getDebugLoc(),
1543 TII->get(TargetInstrInfo::IMPLICIT_DEF), RReg);
1547 // This virtual register is now known to be a spilled value.
1549 continue; // Handle defs in the loop below (handle use&def here though)
1551 bool DoReMat = VRM.isReMaterialized(VirtReg);
1552 int SSorRMId = DoReMat
1553 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1554 int ReuseSlot = SSorRMId;
1556 // Check to see if this stack slot is available.
1557 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1559 // If this is a sub-register use, make sure the reuse register is in the
1560 // right register class. For example, for x86 not all of the 32-bit
1561 // registers have accessible sub-registers.
1562 // Similarly so for EXTRACT_SUBREG. Consider this:
1564 // MOV32_mr fi#1, EDI
1566 // = EXTRACT_SUBREG fi#1
1567 // fi#1 is available in EDI, but it cannot be reused because it's not in
1568 // the right register file.
1570 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1571 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1572 if (!RC->contains(PhysReg))
1577 // This spilled operand might be part of a two-address operand. If this
1578 // is the case, then changing it will necessarily require changing the
1579 // def part of the instruction as well. However, in some cases, we
1580 // aren't allowed to modify the reused register. If none of these cases
1582 bool CanReuse = true;
1583 int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
1585 MI.getOperand(ti).isReg() &&
1586 MI.getOperand(ti).getReg() == VirtReg) {
1587 // Okay, we have a two address operand. We can reuse this physreg as
1588 // long as we are allowed to clobber the value and there isn't an
1589 // earlier def that has already clobbered the physreg.
1590 CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
1591 !ReusedOperands.isClobbered(PhysReg);
1595 // If this stack slot value is already available, reuse it!
1596 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1597 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1599 DOUT << "Reusing SS#" << ReuseSlot;
1600 DOUT << " from physreg "
1601 << TRI->getName(PhysReg) << " for vreg"
1602 << VirtReg <<" instead of reloading into physreg "
1603 << TRI->getName(VRM.getPhys(VirtReg)) << "\n";
1604 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1605 MI.getOperand(i).setReg(RReg);
1607 // The only technical detail we have is that we don't know that
1608 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1609 // later in the instruction. In particular, consider 'op V1, V2'.
1610 // If V1 is available in physreg R0, we would choose to reuse it
1611 // here, instead of reloading it into the register the allocator
1612 // indicated (say R1). However, V2 might have to be reloaded
1613 // later, and it might indicate that it needs to live in R0. When
1614 // this occurs, we need to have information available that
1615 // indicates it is safe to use R1 for the reload instead of R0.
1617 // To further complicate matters, we might conflict with an alias,
1618 // or R0 and R1 might not be compatible with each other. In this
1619 // case, we actually insert a reload for V1 in R1, ensuring that
1620 // we can get at R0 or its alias.
1621 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1622 VRM.getPhys(VirtReg), VirtReg);
1624 // Only mark it clobbered if this is a use&def operand.
1625 ReusedOperands.markClobbered(PhysReg);
1628 if (MI.getOperand(i).isKill() &&
1629 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1631 // The store of this spilled value is potentially dead, but we
1632 // won't know for certain until we've confirmed that the re-use
1633 // above is valid, which means waiting until the other operands
1634 // are processed. For now we just track the spill slot, we'll
1635 // remove it after the other operands are processed if valid.
1637 PotentialDeadStoreSlots.push_back(ReuseSlot);
1643 // Otherwise we have a situation where we have a two-address instruction
1644 // whose mod/ref operand needs to be reloaded. This reload is already
1645 // available in some register "PhysReg", but if we used PhysReg as the
1646 // operand to our 2-addr instruction, the instruction would modify
1647 // PhysReg. This isn't cool if something later uses PhysReg and expects
1648 // to get its initial value.
1650 // To avoid this problem, and to avoid doing a load right after a store,
1651 // we emit a copy from PhysReg into the designated register for this
1653 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1654 assert(DesignatedReg && "Must map virtreg to physreg!");
1656 // Note that, if we reused a register for a previous operand, the
1657 // register we want to reload into might not actually be
1658 // available. If this occurs, use the register indicated by the
1660 if (ReusedOperands.hasReuses())
1661 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1662 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1664 // If the mapped designated register is actually the physreg we have
1665 // incoming, we don't need to inserted a dead copy.
1666 if (DesignatedReg == PhysReg) {
1667 // If this stack slot value is already available, reuse it!
1668 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1669 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1671 DOUT << "Reusing SS#" << ReuseSlot;
1672 DOUT << " from physreg " << TRI->getName(PhysReg)
1673 << " for vreg" << VirtReg
1674 << " instead of reloading into same physreg.\n";
1675 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1676 MI.getOperand(i).setReg(RReg);
1677 ReusedOperands.markClobbered(RReg);
1682 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1683 RegInfo->setPhysRegUsed(DesignatedReg);
1684 ReusedOperands.markClobbered(DesignatedReg);
1685 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1687 MachineInstr *CopyMI = prior(MII);
1688 UpdateKills(*CopyMI, RegKills, KillOps, TRI);
1690 // This invalidates DesignatedReg.
1691 Spills.ClobberPhysReg(DesignatedReg);
1693 Spills.addAvailable(ReuseSlot, DesignatedReg);
1695 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1696 MI.getOperand(i).setReg(RReg);
1697 DOUT << '\t' << *prior(MII);
1702 // Otherwise, reload it and remember that we have it.
1703 PhysReg = VRM.getPhys(VirtReg);
1704 assert(PhysReg && "Must map virtreg to physreg!");
1706 // Note that, if we reused a register for a previous operand, the
1707 // register we want to reload into might not actually be
1708 // available. If this occurs, use the register indicated by the
1710 if (ReusedOperands.hasReuses())
1711 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1712 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1714 RegInfo->setPhysRegUsed(PhysReg);
1715 ReusedOperands.markClobbered(PhysReg);
1717 ReMaterialize(MBB, MII, PhysReg, VirtReg, TII, TRI, VRM);
1719 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1720 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1721 MachineInstr *LoadMI = prior(MII);
1722 VRM.addSpillSlotUse(SSorRMId, LoadMI);
1725 // This invalidates PhysReg.
1726 Spills.ClobberPhysReg(PhysReg);
1728 // Any stores to this stack slot are not dead anymore.
1730 MaybeDeadStores[SSorRMId] = NULL;
1731 Spills.addAvailable(SSorRMId, PhysReg);
1732 // Assumes this is the last use. IsKill will be unset if reg is reused
1733 // unless it's a two-address operand.
1734 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
1735 MI.getOperand(i).setIsKill();
1736 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1737 MI.getOperand(i).setReg(RReg);
1738 UpdateKills(*prior(MII), RegKills, KillOps, TRI);
1739 DOUT << '\t' << *prior(MII);
1742 // Ok - now we can remove stores that have been confirmed dead.
1743 for (unsigned j = 0, e = PotentialDeadStoreSlots.size(); j != e; ++j) {
1744 // This was the last use and the spilled value is still available
1745 // for reuse. That means the spill was unnecessary!
1746 int PDSSlot = PotentialDeadStoreSlots[j];
1747 MachineInstr* DeadStore = MaybeDeadStores[PDSSlot];
1749 DOUT << "Removed dead store:\t" << *DeadStore;
1750 InvalidateKills(*DeadStore, RegKills, KillOps);
1751 VRM.RemoveMachineInstrFromMaps(DeadStore);
1752 MBB.erase(DeadStore);
1753 MaybeDeadStores[PDSSlot] = NULL;
1762 // If we have folded references to memory operands, make sure we clear all
1763 // physical registers that may contain the value of the spilled virtual
1765 SmallSet<int, 2> FoldedSS;
1766 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ) {
1767 unsigned VirtReg = I->second.first;
1768 VirtRegMap::ModRef MR = I->second.second;
1769 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
1771 // MI2VirtMap be can updated which invalidate the iterator.
1772 // Increment the iterator first.
1774 int SS = VRM.getStackSlot(VirtReg);
1775 if (SS == VirtRegMap::NO_STACK_SLOT)
1777 FoldedSS.insert(SS);
1778 DOUT << " - StackSlot: " << SS << "\n";
1780 // If this folded instruction is just a use, check to see if it's a
1781 // straight load from the virt reg slot.
1782 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1784 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1785 if (DestReg && FrameIdx == SS) {
1786 // If this spill slot is available, turn it into a copy (or nothing)
1787 // instead of leaving it as a load!
1788 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1789 DOUT << "Promoted Load To Copy: " << MI;
1790 if (DestReg != InReg) {
1791 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1792 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1793 MachineOperand *DefMO = MI.findRegisterDefOperand(DestReg);
1794 unsigned SubIdx = DefMO->getSubReg();
1795 // Revisit the copy so we make sure to notice the effects of the
1796 // operation on the destreg (either needing to RA it if it's
1797 // virtual or needing to clobber any values if it's physical).
1799 --NextMII; // backtrack to the copy.
1800 // Propagate the sub-register index over.
1802 DefMO = NextMII->findRegisterDefOperand(DestReg);
1803 DefMO->setSubReg(SubIdx);
1807 MachineOperand *KillOpnd = NextMII->findRegisterUseOperand(InReg);
1808 KillOpnd->setIsKill();
1812 DOUT << "Removing now-noop copy: " << MI;
1813 // Unset last kill since it's being reused.
1814 InvalidateKill(InReg, RegKills, KillOps);
1817 InvalidateKills(MI, RegKills, KillOps);
1818 VRM.RemoveMachineInstrFromMaps(&MI);
1821 goto ProcessNextInst;
1824 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1825 SmallVector<MachineInstr*, 4> NewMIs;
1827 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1828 MBB.insert(MII, NewMIs[0]);
1829 InvalidateKills(MI, RegKills, KillOps);
1830 VRM.RemoveMachineInstrFromMaps(&MI);
1833 --NextMII; // backtrack to the unfolded instruction.
1835 goto ProcessNextInst;
1840 // If this reference is not a use, any previous store is now dead.
1841 // Otherwise, the store to this stack slot is not dead anymore.
1842 MachineInstr* DeadStore = MaybeDeadStores[SS];
1844 bool isDead = !(MR & VirtRegMap::isRef);
1845 MachineInstr *NewStore = NULL;
1846 if (MR & VirtRegMap::isModRef) {
1847 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1848 SmallVector<MachineInstr*, 4> NewMIs;
1849 // We can reuse this physreg as long as we are allowed to clobber
1850 // the value and there isn't an earlier def that has already clobbered
1853 !TII->isStoreToStackSlot(&MI, SS)) { // Not profitable!
1854 MachineOperand *KillOpnd =
1855 DeadStore->findRegisterUseOperand(PhysReg, true);
1856 // Note, if the store is storing a sub-register, it's possible the
1857 // super-register is needed below.
1858 if (KillOpnd && !KillOpnd->getSubReg() &&
1859 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true,NewMIs)){
1860 MBB.insert(MII, NewMIs[0]);
1861 NewStore = NewMIs[1];
1862 MBB.insert(MII, NewStore);
1863 VRM.addSpillSlotUse(SS, NewStore);
1864 InvalidateKills(MI, RegKills, KillOps);
1865 VRM.RemoveMachineInstrFromMaps(&MI);
1869 --NextMII; // backtrack to the unfolded instruction.
1876 if (isDead) { // Previous store is dead.
1877 // If we get here, the store is dead, nuke it now.
1878 DOUT << "Removed dead store:\t" << *DeadStore;
1879 InvalidateKills(*DeadStore, RegKills, KillOps);
1880 VRM.RemoveMachineInstrFromMaps(DeadStore);
1881 MBB.erase(DeadStore);
1886 MaybeDeadStores[SS] = NULL;
1888 // Treat this store as a spill merged into a copy. That makes the
1889 // stack slot value available.
1890 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1891 goto ProcessNextInst;
1895 // If the spill slot value is available, and this is a new definition of
1896 // the value, the value is not available anymore.
1897 if (MR & VirtRegMap::isMod) {
1898 // Notice that the value in this stack slot has been modified.
1899 Spills.ModifyStackSlotOrReMat(SS);
1901 // If this is *just* a mod of the value, check to see if this is just a
1902 // store to the spill slot (i.e. the spill got merged into the copy). If
1903 // so, realize that the vreg is available now, and add the store to the
1904 // MaybeDeadStore info.
1906 if (!(MR & VirtRegMap::isRef)) {
1907 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1908 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1909 "Src hasn't been allocated yet?");
1911 if (CommuteToFoldReload(MBB, MII, VirtReg, SrcReg, StackSlot,
1912 Spills, RegKills, KillOps, TRI, VRM)) {
1913 NextMII = next(MII);
1915 goto ProcessNextInst;
1918 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
1919 // this as a potentially dead store in case there is a subsequent
1920 // store into the stack slot without a read from it.
1921 MaybeDeadStores[StackSlot] = &MI;
1923 // If the stack slot value was previously available in some other
1924 // register, change it now. Otherwise, make the register
1925 // available in PhysReg.
1926 Spills.addAvailable(StackSlot, SrcReg, false/*!clobber*/);
1932 // Process all of the spilled defs.
1933 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1934 MachineOperand &MO = MI.getOperand(i);
1935 if (!(MO.isReg() && MO.getReg() && MO.isDef()))
1938 unsigned VirtReg = MO.getReg();
1939 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
1940 // Check to see if this is a noop copy. If so, eliminate the
1941 // instruction before considering the dest reg to be changed.
1942 unsigned Src, Dst, SrcSR, DstSR;
1943 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
1945 DOUT << "Removing now-noop copy: " << MI;
1946 SmallVector<unsigned, 2> KillRegs;
1947 InvalidateKills(MI, RegKills, KillOps, &KillRegs);
1948 if (MO.isDead() && !KillRegs.empty()) {
1949 // Source register or an implicit super/sub-register use is killed.
1950 assert(KillRegs[0] == Dst ||
1951 TRI->isSubRegister(KillRegs[0], Dst) ||
1952 TRI->isSuperRegister(KillRegs[0], Dst));
1953 // Last def is now dead.
1954 TransferDeadness(&MBB, Dist, Src, RegKills, KillOps);
1956 VRM.RemoveMachineInstrFromMaps(&MI);
1959 Spills.disallowClobberPhysReg(VirtReg);
1960 goto ProcessNextInst;
1963 // If it's not a no-op copy, it clobbers the value in the destreg.
1964 Spills.ClobberPhysReg(VirtReg);
1965 ReusedOperands.markClobbered(VirtReg);
1967 // Check to see if this instruction is a load from a stack slot into
1968 // a register. If so, this provides the stack slot value in the reg.
1970 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1971 assert(DestReg == VirtReg && "Unknown load situation!");
1973 // If it is a folded reference, then it's not safe to clobber.
1974 bool Folded = FoldedSS.count(FrameIdx);
1975 // Otherwise, if it wasn't available, remember that it is now!
1976 Spills.addAvailable(FrameIdx, DestReg, !Folded);
1977 goto ProcessNextInst;
1983 unsigned SubIdx = MO.getSubReg();
1984 bool DoReMat = VRM.isReMaterialized(VirtReg);
1986 ReMatDefs.insert(&MI);
1988 // The only vregs left are stack slot definitions.
1989 int StackSlot = VRM.getStackSlot(VirtReg);
1990 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1992 // If this def is part of a two-address operand, make sure to execute
1993 // the store from the correct physical register.
1995 int TiedOp = MI.getDesc().findTiedToSrcOperand(i);
1997 PhysReg = MI.getOperand(TiedOp).getReg();
1999 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
2000 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
2001 "Can't find corresponding super-register!");
2005 PhysReg = VRM.getPhys(VirtReg);
2006 if (ReusedOperands.isClobbered(PhysReg)) {
2007 // Another def has taken the assigned physreg. It must have been a
2008 // use&def which got it due to reuse. Undo the reuse!
2009 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
2010 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
2014 assert(PhysReg && "VR not assigned a physical register?");
2015 RegInfo->setPhysRegUsed(PhysReg);
2016 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
2017 ReusedOperands.markClobbered(RReg);
2018 MI.getOperand(i).setReg(RReg);
2021 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
2022 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
2023 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
2024 NextMII = next(MII);
2026 // Check to see if this is a noop copy. If so, eliminate the
2027 // instruction before considering the dest reg to be changed.
2029 unsigned Src, Dst, SrcSR, DstSR;
2030 if (TII->isMoveInstr(MI, Src, Dst, SrcSR, DstSR) && Src == Dst) {
2032 DOUT << "Removing now-noop copy: " << MI;
2033 InvalidateKills(MI, RegKills, KillOps);
2034 VRM.RemoveMachineInstrFromMaps(&MI);
2037 UpdateKills(*LastStore, RegKills, KillOps, TRI);
2038 goto ProcessNextInst;
2044 DistanceMap.insert(std::make_pair(&MI, Dist++));
2045 if (!Erased && !BackTracked) {
2046 for (MachineBasicBlock::iterator II = &MI; II != NextMII; ++II)
2047 UpdateKills(*II, RegKills, KillOps, TRI);
2054 llvm::Spiller* llvm::createSpiller() {
2055 switch (SpillerOpt) {
2056 default: assert(0 && "Unreachable!");
2058 return new LocalSpiller();
2060 return new SimpleSpiller();