1 //===-- llvm/CodeGen/VirtRegMap.cpp - Virtual Register Map ----------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the VirtRegMap class.
12 // It also contains implementations of the the Spiller interface, which, given a
13 // virtual register map and a machine function, eliminates all virtual
14 // references by replacing them with physical register references - adding spill
17 //===----------------------------------------------------------------------===//
19 #define DEBUG_TYPE "spiller"
20 #include "VirtRegMap.h"
21 #include "llvm/Function.h"
22 #include "llvm/CodeGen/MachineFrameInfo.h"
23 #include "llvm/CodeGen/MachineFunction.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/Target/TargetMachine.h"
26 #include "llvm/Target/TargetInstrInfo.h"
27 #include "llvm/Support/CommandLine.h"
28 #include "llvm/Support/Debug.h"
29 #include "llvm/Support/Compiler.h"
30 #include "llvm/ADT/BitVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/ADT/STLExtras.h"
33 #include "llvm/ADT/SmallSet.h"
37 STATISTIC(NumSpills, "Number of register spills");
38 STATISTIC(NumReMats, "Number of re-materialization");
39 STATISTIC(NumDRM , "Number of re-materializable defs elided");
40 STATISTIC(NumStores, "Number of stores added");
41 STATISTIC(NumLoads , "Number of loads added");
42 STATISTIC(NumReused, "Number of values reused");
43 STATISTIC(NumDSE , "Number of dead stores elided");
44 STATISTIC(NumDCE , "Number of copies elided");
47 enum SpillerName { simple, local };
49 static cl::opt<SpillerName>
51 cl::desc("Spiller to use: (default: local)"),
53 cl::values(clEnumVal(simple, " simple spiller"),
54 clEnumVal(local, " local spiller"),
59 //===----------------------------------------------------------------------===//
60 // VirtRegMap implementation
61 //===----------------------------------------------------------------------===//
63 VirtRegMap::VirtRegMap(MachineFunction &mf)
64 : TII(*mf.getTarget().getInstrInfo()), MF(mf),
65 Virt2PhysMap(NO_PHYS_REG), Virt2StackSlotMap(NO_STACK_SLOT),
66 Virt2ReMatIdMap(NO_STACK_SLOT), Virt2SplitMap(0),
67 Virt2SplitKillMap(0), ReMatMap(NULL), ReMatId(MAX_STACK_SLOT+1) {
71 void VirtRegMap::grow() {
72 unsigned LastVirtReg = MF.getRegInfo().getLastVirtReg();
73 Virt2PhysMap.grow(LastVirtReg);
74 Virt2StackSlotMap.grow(LastVirtReg);
75 Virt2ReMatIdMap.grow(LastVirtReg);
76 Virt2SplitMap.grow(LastVirtReg);
77 Virt2SplitKillMap.grow(LastVirtReg);
78 ReMatMap.grow(LastVirtReg);
81 int VirtRegMap::assignVirt2StackSlot(unsigned virtReg) {
82 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
83 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
84 "attempt to assign stack slot to already spilled register");
85 const TargetRegisterClass* RC = MF.getRegInfo().getRegClass(virtReg);
86 int frameIndex = MF.getFrameInfo()->CreateStackObject(RC->getSize(),
88 Virt2StackSlotMap[virtReg] = frameIndex;
93 void VirtRegMap::assignVirt2StackSlot(unsigned virtReg, int frameIndex) {
94 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
95 assert(Virt2StackSlotMap[virtReg] == NO_STACK_SLOT &&
96 "attempt to assign stack slot to already spilled register");
97 assert((frameIndex >= 0 ||
98 (frameIndex >= MF.getFrameInfo()->getObjectIndexBegin())) &&
99 "illegal fixed frame index");
100 Virt2StackSlotMap[virtReg] = frameIndex;
103 int VirtRegMap::assignVirtReMatId(unsigned virtReg) {
104 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
105 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
106 "attempt to assign re-mat id to already spilled register");
107 Virt2ReMatIdMap[virtReg] = ReMatId;
111 void VirtRegMap::assignVirtReMatId(unsigned virtReg, int id) {
112 assert(TargetRegisterInfo::isVirtualRegister(virtReg));
113 assert(Virt2ReMatIdMap[virtReg] == NO_STACK_SLOT &&
114 "attempt to assign re-mat id to already spilled register");
115 Virt2ReMatIdMap[virtReg] = id;
118 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *OldMI,
119 MachineInstr *NewMI, ModRef MRInfo) {
120 // Move previous memory references folded to new instruction.
121 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(NewMI);
122 for (MI2VirtMapTy::iterator I = MI2VirtMap.lower_bound(OldMI),
123 E = MI2VirtMap.end(); I != E && I->first == OldMI; ) {
124 MI2VirtMap.insert(IP, std::make_pair(NewMI, I->second));
125 MI2VirtMap.erase(I++);
128 // add new memory reference
129 MI2VirtMap.insert(IP, std::make_pair(NewMI, std::make_pair(VirtReg, MRInfo)));
132 void VirtRegMap::virtFolded(unsigned VirtReg, MachineInstr *MI, ModRef MRInfo) {
133 MI2VirtMapTy::iterator IP = MI2VirtMap.lower_bound(MI);
134 MI2VirtMap.insert(IP, std::make_pair(MI, std::make_pair(VirtReg, MRInfo)));
137 void VirtRegMap::print(std::ostream &OS) const {
138 const TargetRegisterInfo* TRI = MF.getTarget().getRegisterInfo();
140 OS << "********** REGISTER MAP **********\n";
141 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
142 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i) {
143 if (Virt2PhysMap[i] != (unsigned)VirtRegMap::NO_PHYS_REG)
144 OS << "[reg" << i << " -> " << TRI->getPrintableName(Virt2PhysMap[i])
148 for (unsigned i = TargetRegisterInfo::FirstVirtualRegister,
149 e = MF.getRegInfo().getLastVirtReg(); i <= e; ++i)
150 if (Virt2StackSlotMap[i] != VirtRegMap::NO_STACK_SLOT)
151 OS << "[reg" << i << " -> fi#" << Virt2StackSlotMap[i] << "]\n";
155 void VirtRegMap::dump() const {
160 //===----------------------------------------------------------------------===//
161 // Simple Spiller Implementation
162 //===----------------------------------------------------------------------===//
164 Spiller::~Spiller() {}
167 struct VISIBILITY_HIDDEN SimpleSpiller : public Spiller {
168 bool runOnMachineFunction(MachineFunction& mf, VirtRegMap &VRM);
172 bool SimpleSpiller::runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
173 DOUT << "********** REWRITE MACHINE CODE **********\n";
174 DOUT << "********** Function: " << MF.getFunction()->getName() << '\n';
175 const TargetMachine &TM = MF.getTarget();
176 const TargetInstrInfo &TII = *TM.getInstrInfo();
179 // LoadedRegs - Keep track of which vregs are loaded, so that we only load
180 // each vreg once (in the case where a spilled vreg is used by multiple
181 // operands). This is always smaller than the number of operands to the
182 // current machine instr, so it should be small.
183 std::vector<unsigned> LoadedRegs;
185 for (MachineFunction::iterator MBBI = MF.begin(), E = MF.end();
187 DOUT << MBBI->getBasicBlock()->getName() << ":\n";
188 MachineBasicBlock &MBB = *MBBI;
189 for (MachineBasicBlock::iterator MII = MBB.begin(),
190 E = MBB.end(); MII != E; ++MII) {
191 MachineInstr &MI = *MII;
192 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
193 MachineOperand &MO = MI.getOperand(i);
194 if (MO.isRegister() && MO.getReg()) {
195 if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
196 unsigned VirtReg = MO.getReg();
197 unsigned PhysReg = VRM.getPhys(VirtReg);
198 if (!VRM.isAssignedReg(VirtReg)) {
199 int StackSlot = VRM.getStackSlot(VirtReg);
200 const TargetRegisterClass* RC =
201 MF.getRegInfo().getRegClass(VirtReg);
204 std::find(LoadedRegs.begin(), LoadedRegs.end(), VirtReg)
205 == LoadedRegs.end()) {
206 TII.loadRegFromStackSlot(MBB, &MI, PhysReg, StackSlot, RC);
207 LoadedRegs.push_back(VirtReg);
209 DOUT << '\t' << *prior(MII);
213 TII.storeRegToStackSlot(MBB, next(MII), PhysReg, true,
218 MF.getRegInfo().setPhysRegUsed(PhysReg);
219 MI.getOperand(i).setReg(PhysReg);
221 MF.getRegInfo().setPhysRegUsed(MO.getReg());
233 //===----------------------------------------------------------------------===//
234 // Local Spiller Implementation
235 //===----------------------------------------------------------------------===//
238 class AvailableSpills;
240 /// LocalSpiller - This spiller does a simple pass over the machine basic
241 /// block to attempt to keep spills in registers as much as possible for
242 /// blocks that have low register pressure (the vreg may be spilled due to
243 /// register pressure in other blocks).
244 class VISIBILITY_HIDDEN LocalSpiller : public Spiller {
245 MachineRegisterInfo *RegInfo;
246 const TargetRegisterInfo *TRI;
247 const TargetInstrInfo *TII;
249 bool runOnMachineFunction(MachineFunction &MF, VirtRegMap &VRM) {
250 RegInfo = &MF.getRegInfo();
251 TRI = MF.getTarget().getRegisterInfo();
252 TII = MF.getTarget().getInstrInfo();
253 DOUT << "\n**** Local spiller rewriting function '"
254 << MF.getFunction()->getName() << "':\n";
255 DOUT << "**** Machine Instrs (NOTE! Does not include spills and reloads!)"
259 for (MachineFunction::iterator MBB = MF.begin(), E = MF.end();
261 RewriteMBB(*MBB, VRM);
263 DOUT << "**** Post Machine Instrs ****\n";
269 bool PrepForUnfoldOpti(MachineBasicBlock &MBB,
270 MachineBasicBlock::iterator &MII,
271 std::vector<MachineInstr*> &MaybeDeadStores,
272 AvailableSpills &Spills, BitVector &RegKills,
273 std::vector<MachineOperand*> &KillOps,
275 void SpillRegToStackSlot(MachineBasicBlock &MBB,
276 MachineBasicBlock::iterator &MII,
277 int Idx, unsigned PhysReg, int StackSlot,
278 const TargetRegisterClass *RC,
279 bool isAvailable, MachineInstr *&LastStore,
280 AvailableSpills &Spills,
281 SmallSet<MachineInstr*, 4> &ReMatDefs,
283 std::vector<MachineOperand*> &KillOps,
285 void RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM);
289 /// AvailableSpills - As the local spiller is scanning and rewriting an MBB from
290 /// top down, keep track of which spills slots or remat are available in each
293 /// Note that not all physregs are created equal here. In particular, some
294 /// physregs are reloads that we are allowed to clobber or ignore at any time.
295 /// Other physregs are values that the register allocated program is using that
296 /// we cannot CHANGE, but we can read if we like. We keep track of this on a
297 /// per-stack-slot / remat id basis as the low bit in the value of the
298 /// SpillSlotsAvailable entries. The predicate 'canClobberPhysReg()' checks
299 /// this bit and addAvailable sets it if.
301 class VISIBILITY_HIDDEN AvailableSpills {
302 const TargetRegisterInfo *TRI;
303 const TargetInstrInfo *TII;
305 // SpillSlotsOrReMatsAvailable - This map keeps track of all of the spilled
306 // or remat'ed virtual register values that are still available, due to being
307 // loaded or stored to, but not invalidated yet.
308 std::map<int, unsigned> SpillSlotsOrReMatsAvailable;
310 // PhysRegsAvailable - This is the inverse of SpillSlotsOrReMatsAvailable,
311 // indicating which stack slot values are currently held by a physreg. This
312 // is used to invalidate entries in SpillSlotsOrReMatsAvailable when a
313 // physreg is modified.
314 std::multimap<unsigned, int> PhysRegsAvailable;
316 void disallowClobberPhysRegOnly(unsigned PhysReg);
318 void ClobberPhysRegOnly(unsigned PhysReg);
320 AvailableSpills(const TargetRegisterInfo *tri, const TargetInstrInfo *tii)
321 : TRI(tri), TII(tii) {
324 const TargetRegisterInfo *getRegInfo() const { return TRI; }
326 /// getSpillSlotOrReMatPhysReg - If the specified stack slot or remat is
327 /// available in a physical register, return that PhysReg, otherwise
329 unsigned getSpillSlotOrReMatPhysReg(int Slot) const {
330 std::map<int, unsigned>::const_iterator I =
331 SpillSlotsOrReMatsAvailable.find(Slot);
332 if (I != SpillSlotsOrReMatsAvailable.end()) {
333 return I->second >> 1; // Remove the CanClobber bit.
338 /// addAvailable - Mark that the specified stack slot / remat is available in
339 /// the specified physreg. If CanClobber is true, the physreg can be modified
340 /// at any time without changing the semantics of the program.
341 void addAvailable(int SlotOrReMat, MachineInstr *MI, unsigned Reg,
342 bool CanClobber = true) {
343 // If this stack slot is thought to be available in some other physreg,
344 // remove its record.
345 ModifyStackSlotOrReMat(SlotOrReMat);
347 PhysRegsAvailable.insert(std::make_pair(Reg, SlotOrReMat));
348 SpillSlotsOrReMatsAvailable[SlotOrReMat]= (Reg << 1) | (unsigned)CanClobber;
350 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
351 DOUT << "Remembering RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1;
353 DOUT << "Remembering SS#" << SlotOrReMat;
354 DOUT << " in physreg " << TRI->getPrintableName(Reg) << "\n";
357 /// canClobberPhysReg - Return true if the spiller is allowed to change the
358 /// value of the specified stackslot register if it desires. The specified
359 /// stack slot must be available in a physreg for this query to make sense.
360 bool canClobberPhysReg(int SlotOrReMat) const {
361 assert(SpillSlotsOrReMatsAvailable.count(SlotOrReMat) &&
362 "Value not available!");
363 return SpillSlotsOrReMatsAvailable.find(SlotOrReMat)->second & 1;
366 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
367 /// stackslot register. The register is still available but is no longer
368 /// allowed to be modifed.
369 void disallowClobberPhysReg(unsigned PhysReg);
371 /// ClobberPhysReg - This is called when the specified physreg changes
372 /// value. We use this to invalidate any info about stuff that lives in
373 /// it and any of its aliases.
374 void ClobberPhysReg(unsigned PhysReg);
376 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
377 /// slot changes. This removes information about which register the previous
378 /// value for this slot lives in (as the previous value is dead now).
379 void ModifyStackSlotOrReMat(int SlotOrReMat);
383 /// disallowClobberPhysRegOnly - Unset the CanClobber bit of the specified
384 /// stackslot register. The register is still available but is no longer
385 /// allowed to be modifed.
386 void AvailableSpills::disallowClobberPhysRegOnly(unsigned PhysReg) {
387 std::multimap<unsigned, int>::iterator I =
388 PhysRegsAvailable.lower_bound(PhysReg);
389 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
390 int SlotOrReMat = I->second;
392 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
393 "Bidirectional map mismatch!");
394 SpillSlotsOrReMatsAvailable[SlotOrReMat] &= ~1;
395 DOUT << "PhysReg " << TRI->getPrintableName(PhysReg)
396 << " copied, it is available for use but can no longer be modified\n";
400 /// disallowClobberPhysReg - Unset the CanClobber bit of the specified
401 /// stackslot register and its aliases. The register and its aliases may
402 /// still available but is no longer allowed to be modifed.
403 void AvailableSpills::disallowClobberPhysReg(unsigned PhysReg) {
404 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
405 disallowClobberPhysRegOnly(*AS);
406 disallowClobberPhysRegOnly(PhysReg);
409 /// ClobberPhysRegOnly - This is called when the specified physreg changes
410 /// value. We use this to invalidate any info about stuff we thing lives in it.
411 void AvailableSpills::ClobberPhysRegOnly(unsigned PhysReg) {
412 std::multimap<unsigned, int>::iterator I =
413 PhysRegsAvailable.lower_bound(PhysReg);
414 while (I != PhysRegsAvailable.end() && I->first == PhysReg) {
415 int SlotOrReMat = I->second;
416 PhysRegsAvailable.erase(I++);
417 assert((SpillSlotsOrReMatsAvailable[SlotOrReMat] >> 1) == PhysReg &&
418 "Bidirectional map mismatch!");
419 SpillSlotsOrReMatsAvailable.erase(SlotOrReMat);
420 DOUT << "PhysReg " << TRI->getPrintableName(PhysReg)
421 << " clobbered, invalidating ";
422 if (SlotOrReMat > VirtRegMap::MAX_STACK_SLOT)
423 DOUT << "RM#" << SlotOrReMat-VirtRegMap::MAX_STACK_SLOT-1 << "\n";
425 DOUT << "SS#" << SlotOrReMat << "\n";
429 /// ClobberPhysReg - This is called when the specified physreg changes
430 /// value. We use this to invalidate any info about stuff we thing lives in
431 /// it and any of its aliases.
432 void AvailableSpills::ClobberPhysReg(unsigned PhysReg) {
433 for (const unsigned *AS = TRI->getAliasSet(PhysReg); *AS; ++AS)
434 ClobberPhysRegOnly(*AS);
435 ClobberPhysRegOnly(PhysReg);
438 /// ModifyStackSlotOrReMat - This method is called when the value in a stack
439 /// slot changes. This removes information about which register the previous
440 /// value for this slot lives in (as the previous value is dead now).
441 void AvailableSpills::ModifyStackSlotOrReMat(int SlotOrReMat) {
442 std::map<int, unsigned>::iterator It =
443 SpillSlotsOrReMatsAvailable.find(SlotOrReMat);
444 if (It == SpillSlotsOrReMatsAvailable.end()) return;
445 unsigned Reg = It->second >> 1;
446 SpillSlotsOrReMatsAvailable.erase(It);
448 // This register may hold the value of multiple stack slots, only remove this
449 // stack slot from the set of values the register contains.
450 std::multimap<unsigned, int>::iterator I = PhysRegsAvailable.lower_bound(Reg);
452 assert(I != PhysRegsAvailable.end() && I->first == Reg &&
453 "Map inverse broken!");
454 if (I->second == SlotOrReMat) break;
456 PhysRegsAvailable.erase(I);
461 /// InvalidateKills - MI is going to be deleted. If any of its operands are
462 /// marked kill, then invalidate the information.
463 static void InvalidateKills(MachineInstr &MI, BitVector &RegKills,
464 std::vector<MachineOperand*> &KillOps,
465 SmallVector<unsigned, 2> *KillRegs = NULL) {
466 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
467 MachineOperand &MO = MI.getOperand(i);
468 if (!MO.isRegister() || !MO.isUse() || !MO.isKill())
470 unsigned Reg = MO.getReg();
472 KillRegs->push_back(Reg);
473 if (KillOps[Reg] == &MO) {
480 /// InvalidateKill - A MI that defines the specified register is being deleted,
481 /// invalidate the register kill information.
482 static void InvalidateKill(unsigned Reg, BitVector &RegKills,
483 std::vector<MachineOperand*> &KillOps) {
485 KillOps[Reg]->setIsKill(false);
491 /// InvalidateRegDef - If the def operand of the specified def MI is now dead
492 /// (since it's spill instruction is removed), mark it isDead. Also checks if
493 /// the def MI has other definition operands that are not dead. Returns it by
495 static bool InvalidateRegDef(MachineBasicBlock::iterator I,
496 MachineInstr &NewDef, unsigned Reg,
498 // Due to remat, it's possible this reg isn't being reused. That is,
499 // the def of this reg (by prev MI) is now dead.
500 MachineInstr *DefMI = I;
501 MachineOperand *DefOp = NULL;
502 for (unsigned i = 0, e = DefMI->getNumOperands(); i != e; ++i) {
503 MachineOperand &MO = DefMI->getOperand(i);
504 if (MO.isRegister() && MO.isDef()) {
505 if (MO.getReg() == Reg)
507 else if (!MO.isDead())
514 bool FoundUse = false, Done = false;
515 MachineBasicBlock::iterator E = NewDef;
517 for (; !Done && I != E; ++I) {
518 MachineInstr *NMI = I;
519 for (unsigned j = 0, ee = NMI->getNumOperands(); j != ee; ++j) {
520 MachineOperand &MO = NMI->getOperand(j);
521 if (!MO.isRegister() || MO.getReg() != Reg)
525 Done = true; // Stop after scanning all the operands of this MI.
536 /// UpdateKills - Track and update kill info. If a MI reads a register that is
537 /// marked kill, then it must be due to register reuse. Transfer the kill info
539 static void UpdateKills(MachineInstr &MI, BitVector &RegKills,
540 std::vector<MachineOperand*> &KillOps) {
541 const TargetInstrDesc &TID = MI.getDesc();
542 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
543 MachineOperand &MO = MI.getOperand(i);
544 if (!MO.isRegister() || !MO.isUse())
546 unsigned Reg = MO.getReg();
551 // That can't be right. Register is killed but not re-defined and it's
552 // being reused. Let's fix that.
553 KillOps[Reg]->setIsKill(false);
556 if (i < TID.getNumOperands() &&
557 TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
558 // Unless it's a two-address operand, this is the new kill.
567 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
568 const MachineOperand &MO = MI.getOperand(i);
569 if (!MO.isRegister() || !MO.isDef())
571 unsigned Reg = MO.getReg();
577 /// ReMaterialize - Re-materialize definition for Reg targetting DestReg.
579 static void ReMaterialize(MachineBasicBlock &MBB,
580 MachineBasicBlock::iterator &MII,
581 unsigned DestReg, unsigned Reg,
582 const TargetRegisterInfo *TRI,
584 TRI->reMaterialize(MBB, MII, DestReg, VRM.getReMaterializedMI(Reg));
585 MachineInstr *NewMI = prior(MII);
586 for (unsigned i = 0, e = NewMI->getNumOperands(); i != e; ++i) {
587 MachineOperand &MO = NewMI->getOperand(i);
588 if (!MO.isRegister() || MO.getReg() == 0)
590 unsigned VirtReg = MO.getReg();
591 if (TargetRegisterInfo::isPhysicalRegister(VirtReg))
594 unsigned SubIdx = MO.getSubReg();
595 unsigned Phys = VRM.getPhys(VirtReg);
597 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
604 // ReusedOp - For each reused operand, we keep track of a bit of information, in
605 // case we need to rollback upon processing a new operand. See comments below.
608 // The MachineInstr operand that reused an available value.
611 // StackSlotOrReMat - The spill slot or remat id of the value being reused.
612 unsigned StackSlotOrReMat;
614 // PhysRegReused - The physical register the value was available in.
615 unsigned PhysRegReused;
617 // AssignedPhysReg - The physreg that was assigned for use by the reload.
618 unsigned AssignedPhysReg;
620 // VirtReg - The virtual register itself.
623 ReusedOp(unsigned o, unsigned ss, unsigned prr, unsigned apr,
625 : Operand(o), StackSlotOrReMat(ss), PhysRegReused(prr),
626 AssignedPhysReg(apr), VirtReg(vreg) {}
629 /// ReuseInfo - This maintains a collection of ReuseOp's for each operand that
630 /// is reused instead of reloaded.
631 class VISIBILITY_HIDDEN ReuseInfo {
633 std::vector<ReusedOp> Reuses;
634 BitVector PhysRegsClobbered;
636 ReuseInfo(MachineInstr &mi, const TargetRegisterInfo *tri) : MI(mi) {
637 PhysRegsClobbered.resize(tri->getNumRegs());
640 bool hasReuses() const {
641 return !Reuses.empty();
644 /// addReuse - If we choose to reuse a virtual register that is already
645 /// available instead of reloading it, remember that we did so.
646 void addReuse(unsigned OpNo, unsigned StackSlotOrReMat,
647 unsigned PhysRegReused, unsigned AssignedPhysReg,
649 // If the reload is to the assigned register anyway, no undo will be
651 if (PhysRegReused == AssignedPhysReg) return;
653 // Otherwise, remember this.
654 Reuses.push_back(ReusedOp(OpNo, StackSlotOrReMat, PhysRegReused,
655 AssignedPhysReg, VirtReg));
658 void markClobbered(unsigned PhysReg) {
659 PhysRegsClobbered.set(PhysReg);
662 bool isClobbered(unsigned PhysReg) const {
663 return PhysRegsClobbered.test(PhysReg);
666 /// GetRegForReload - We are about to emit a reload into PhysReg. If there
667 /// is some other operand that is using the specified register, either pick
668 /// a new register to use, or evict the previous reload and use this reg.
669 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
670 AvailableSpills &Spills,
671 std::vector<MachineInstr*> &MaybeDeadStores,
672 SmallSet<unsigned, 8> &Rejected,
674 std::vector<MachineOperand*> &KillOps,
676 const TargetInstrInfo* TII = MI->getParent()->getParent()->getTarget()
679 if (Reuses.empty()) return PhysReg; // This is most often empty.
681 for (unsigned ro = 0, e = Reuses.size(); ro != e; ++ro) {
682 ReusedOp &Op = Reuses[ro];
683 // If we find some other reuse that was supposed to use this register
684 // exactly for its reload, we can change this reload to use ITS reload
685 // register. That is, unless its reload register has already been
686 // considered and subsequently rejected because it has also been reused
687 // by another operand.
688 if (Op.PhysRegReused == PhysReg &&
689 Rejected.count(Op.AssignedPhysReg) == 0) {
690 // Yup, use the reload register that we didn't use before.
691 unsigned NewReg = Op.AssignedPhysReg;
692 Rejected.insert(PhysReg);
693 return GetRegForReload(NewReg, MI, Spills, MaybeDeadStores, Rejected,
694 RegKills, KillOps, VRM);
696 // Otherwise, we might also have a problem if a previously reused
697 // value aliases the new register. If so, codegen the previous reload
699 unsigned PRRU = Op.PhysRegReused;
700 const TargetRegisterInfo *TRI = Spills.getRegInfo();
701 if (TRI->areAliases(PRRU, PhysReg)) {
702 // Okay, we found out that an alias of a reused register
703 // was used. This isn't good because it means we have
704 // to undo a previous reuse.
705 MachineBasicBlock *MBB = MI->getParent();
706 const TargetRegisterClass *AliasRC =
707 MBB->getParent()->getRegInfo().getRegClass(Op.VirtReg);
709 // Copy Op out of the vector and remove it, we're going to insert an
710 // explicit load for it.
712 Reuses.erase(Reuses.begin()+ro);
714 // Ok, we're going to try to reload the assigned physreg into the
715 // slot that we were supposed to in the first place. However, that
716 // register could hold a reuse. Check to see if it conflicts or
717 // would prefer us to use a different register.
718 unsigned NewPhysReg = GetRegForReload(NewOp.AssignedPhysReg,
719 MI, Spills, MaybeDeadStores,
720 Rejected, RegKills, KillOps, VRM);
722 MachineBasicBlock::iterator MII = MI;
723 if (NewOp.StackSlotOrReMat > VirtRegMap::MAX_STACK_SLOT) {
724 ReMaterialize(*MBB, MII, NewPhysReg, NewOp.VirtReg, TRI, VRM);
726 TII->loadRegFromStackSlot(*MBB, MII, NewPhysReg,
727 NewOp.StackSlotOrReMat, AliasRC);
728 // Any stores to this stack slot are not dead anymore.
729 MaybeDeadStores[NewOp.StackSlotOrReMat] = NULL;
732 Spills.ClobberPhysReg(NewPhysReg);
733 Spills.ClobberPhysReg(NewOp.PhysRegReused);
735 MI->getOperand(NewOp.Operand).setReg(NewPhysReg);
737 Spills.addAvailable(NewOp.StackSlotOrReMat, MI, NewPhysReg);
739 UpdateKills(*MII, RegKills, KillOps);
740 DOUT << '\t' << *MII;
742 DOUT << "Reuse undone!\n";
745 // Finally, PhysReg is now available, go ahead and use it.
753 /// GetRegForReload - Helper for the above GetRegForReload(). Add a
754 /// 'Rejected' set to remember which registers have been considered and
755 /// rejected for the reload. This avoids infinite looping in case like
758 /// t2 <- assigned r0 for use by the reload but ended up reuse r1
759 /// t3 <- assigned r1 for use by the reload but ended up reuse r0
761 /// sees r1 is taken by t2, tries t2's reload register r0
762 /// sees r0 is taken by t3, tries t3's reload register r1
763 /// sees r1 is taken by t2, tries t2's reload register r0 ...
764 unsigned GetRegForReload(unsigned PhysReg, MachineInstr *MI,
765 AvailableSpills &Spills,
766 std::vector<MachineInstr*> &MaybeDeadStores,
768 std::vector<MachineOperand*> &KillOps,
770 SmallSet<unsigned, 8> Rejected;
771 return GetRegForReload(PhysReg, MI, Spills, MaybeDeadStores, Rejected,
772 RegKills, KillOps, VRM);
777 /// PrepForUnfoldOpti - Turn a store folding instruction into a load folding
778 /// instruction. e.g.
780 /// movl %eax, -32(%ebp)
781 /// movl -36(%ebp), %eax
782 /// orl %eax, -32(%ebp)
785 /// orl -36(%ebp), %eax
786 /// mov %eax, -32(%ebp)
787 /// This enables unfolding optimization for a subsequent instruction which will
788 /// also eliminate the newly introduced store instruction.
789 bool LocalSpiller::PrepForUnfoldOpti(MachineBasicBlock &MBB,
790 MachineBasicBlock::iterator &MII,
791 std::vector<MachineInstr*> &MaybeDeadStores,
792 AvailableSpills &Spills,
794 std::vector<MachineOperand*> &KillOps,
796 MachineFunction &MF = *MBB.getParent();
797 MachineInstr &MI = *MII;
798 unsigned UnfoldedOpc = 0;
799 unsigned UnfoldPR = 0;
800 unsigned UnfoldVR = 0;
801 int FoldedSS = VirtRegMap::NO_STACK_SLOT;
802 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
803 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
804 // Only transform a MI that folds a single register.
807 UnfoldVR = I->second.first;
808 VirtRegMap::ModRef MR = I->second.second;
809 if (VRM.isAssignedReg(UnfoldVR))
811 // If this reference is not a use, any previous store is now dead.
812 // Otherwise, the store to this stack slot is not dead anymore.
813 FoldedSS = VRM.getStackSlot(UnfoldVR);
814 MachineInstr* DeadStore = MaybeDeadStores[FoldedSS];
815 if (DeadStore && (MR & VirtRegMap::isModRef)) {
816 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(FoldedSS);
818 DeadStore->findRegisterUseOperandIdx(PhysReg, true) == -1)
821 UnfoldedOpc = TII->getOpcodeAfterMemoryUnfold(MI.getOpcode(),
829 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
830 MachineOperand &MO = MI.getOperand(i);
831 if (!MO.isRegister() || MO.getReg() == 0 || !MO.isUse())
833 unsigned VirtReg = MO.getReg();
834 if (TargetRegisterInfo::isPhysicalRegister(VirtReg) || MO.getSubReg())
836 if (VRM.isAssignedReg(VirtReg)) {
837 unsigned PhysReg = VRM.getPhys(VirtReg);
838 if (PhysReg && TRI->regsOverlap(PhysReg, UnfoldPR))
840 } else if (VRM.isReMaterialized(VirtReg))
842 int SS = VRM.getStackSlot(VirtReg);
843 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
845 if (TRI->regsOverlap(PhysReg, UnfoldPR))
849 PhysReg = VRM.getPhys(VirtReg);
850 if (!TRI->regsOverlap(PhysReg, UnfoldPR))
853 // Ok, we'll need to reload the value into a register which makes
854 // it impossible to perform the store unfolding optimization later.
855 // Let's see if it is possible to fold the load if the store is
856 // unfolded. This allows us to perform the store unfolding
858 SmallVector<MachineInstr*, 4> NewMIs;
859 if (TII->unfoldMemoryOperand(MF, &MI, UnfoldVR, false, false, NewMIs)) {
860 assert(NewMIs.size() == 1);
861 MachineInstr *NewMI = NewMIs.back();
863 int Idx = NewMI->findRegisterUseOperandIdx(VirtReg);
865 SmallVector<unsigned, 2> Ops;
867 MachineInstr *FoldedMI = TII->foldMemoryOperand(MF, NewMI, Ops, SS);
869 if (!VRM.hasPhys(UnfoldVR))
870 VRM.assignVirt2Phys(UnfoldVR, UnfoldPR);
871 VRM.virtFolded(VirtReg, FoldedMI, VirtRegMap::isRef);
872 MII = MBB.insert(MII, FoldedMI);
873 VRM.RemoveMachineInstrFromMaps(&MI);
883 /// findSuperReg - Find the SubReg's super-register of given register class
884 /// where its SubIdx sub-register is SubReg.
885 static unsigned findSuperReg(const TargetRegisterClass *RC, unsigned SubReg,
886 unsigned SubIdx, const TargetRegisterInfo *TRI) {
887 for (TargetRegisterClass::iterator I = RC->begin(), E = RC->end();
890 if (TRI->getSubReg(Reg, SubIdx) == SubReg)
896 /// SpillRegToStackSlot - Spill a register to a specified stack slot. Check if
897 /// the last store to the same slot is now dead. If so, remove the last store.
898 void LocalSpiller::SpillRegToStackSlot(MachineBasicBlock &MBB,
899 MachineBasicBlock::iterator &MII,
900 int Idx, unsigned PhysReg, int StackSlot,
901 const TargetRegisterClass *RC,
902 bool isAvailable, MachineInstr *&LastStore,
903 AvailableSpills &Spills,
904 SmallSet<MachineInstr*, 4> &ReMatDefs,
906 std::vector<MachineOperand*> &KillOps,
908 TII->storeRegToStackSlot(MBB, next(MII), PhysReg, true, StackSlot, RC);
909 DOUT << "Store:\t" << *next(MII);
911 // If there is a dead store to this stack slot, nuke it now.
913 DOUT << "Removed dead store:\t" << *LastStore;
915 SmallVector<unsigned, 2> KillRegs;
916 InvalidateKills(*LastStore, RegKills, KillOps, &KillRegs);
917 MachineBasicBlock::iterator PrevMII = LastStore;
918 bool CheckDef = PrevMII != MBB.begin();
921 MBB.erase(LastStore);
922 VRM.RemoveMachineInstrFromMaps(LastStore);
924 // Look at defs of killed registers on the store. Mark the defs
925 // as dead since the store has been deleted and they aren't
927 for (unsigned j = 0, ee = KillRegs.size(); j != ee; ++j) {
928 bool HasOtherDef = false;
929 if (InvalidateRegDef(PrevMII, *MII, KillRegs[j], HasOtherDef)) {
930 MachineInstr *DeadDef = PrevMII;
931 if (ReMatDefs.count(DeadDef) && !HasOtherDef) {
932 // FIXME: This assumes a remat def does not have side
935 VRM.RemoveMachineInstrFromMaps(DeadDef);
943 LastStore = next(MII);
945 // If the stack slot value was previously available in some other
946 // register, change it now. Otherwise, make the register available,
948 Spills.ModifyStackSlotOrReMat(StackSlot);
949 Spills.ClobberPhysReg(PhysReg);
950 Spills.addAvailable(StackSlot, LastStore, PhysReg, isAvailable);
954 /// rewriteMBB - Keep track of which spills are available even after the
955 /// register allocator is done with them. If possible, avid reloading vregs.
956 void LocalSpiller::RewriteMBB(MachineBasicBlock &MBB, VirtRegMap &VRM) {
957 DOUT << MBB.getBasicBlock()->getName() << ":\n";
959 MachineFunction &MF = *MBB.getParent();
961 // Spills - Keep track of which spilled values are available in physregs so
962 // that we can choose to reuse the physregs instead of emitting reloads.
963 AvailableSpills Spills(TRI, TII);
965 // MaybeDeadStores - When we need to write a value back into a stack slot,
966 // keep track of the inserted store. If the stack slot value is never read
967 // (because the value was used from some available register, for example), and
968 // subsequently stored to, the original store is dead. This map keeps track
969 // of inserted stores that are not used. If we see a subsequent store to the
970 // same stack slot, the original store is deleted.
971 std::vector<MachineInstr*> MaybeDeadStores;
972 MaybeDeadStores.resize(MF.getFrameInfo()->getObjectIndexEnd(), NULL);
974 // ReMatDefs - These are rematerializable def MIs which are not deleted.
975 SmallSet<MachineInstr*, 4> ReMatDefs;
977 // Keep track of kill information.
978 BitVector RegKills(TRI->getNumRegs());
979 std::vector<MachineOperand*> KillOps;
980 KillOps.resize(TRI->getNumRegs(), NULL);
982 for (MachineBasicBlock::iterator MII = MBB.begin(), E = MBB.end();
984 MachineBasicBlock::iterator NextMII = MII; ++NextMII;
986 VirtRegMap::MI2VirtMapTy::const_iterator I, End;
988 bool BackTracked = false;
989 if (PrepForUnfoldOpti(MBB, MII,
990 MaybeDeadStores, Spills, RegKills, KillOps, VRM))
993 MachineInstr &MI = *MII;
994 const TargetInstrDesc &TID = MI.getDesc();
996 // Insert restores here if asked to.
997 if (VRM.isRestorePt(&MI)) {
998 std::vector<unsigned> &RestoreRegs = VRM.getRestorePtRestores(&MI);
999 for (unsigned i = 0, e = RestoreRegs.size(); i != e; ++i) {
1000 unsigned VirtReg = RestoreRegs[e-i-1]; // Reverse order.
1001 if (!VRM.getPreSplitReg(VirtReg))
1002 continue; // Split interval spilled again.
1003 unsigned Phys = VRM.getPhys(VirtReg);
1004 RegInfo->setPhysRegUsed(Phys);
1005 if (VRM.isReMaterialized(VirtReg)) {
1006 ReMaterialize(MBB, MII, Phys, VirtReg, TRI, VRM);
1008 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1009 TII->loadRegFromStackSlot(MBB, &MI, Phys, VRM.getStackSlot(VirtReg),
1013 // This invalidates Phys.
1014 Spills.ClobberPhysReg(Phys);
1015 UpdateKills(*prior(MII), RegKills, KillOps);
1016 DOUT << '\t' << *prior(MII);
1020 // Insert spills here if asked to.
1021 if (VRM.isSpillPt(&MI)) {
1022 std::vector<std::pair<unsigned,bool> > &SpillRegs =
1023 VRM.getSpillPtSpills(&MI);
1024 for (unsigned i = 0, e = SpillRegs.size(); i != e; ++i) {
1025 unsigned VirtReg = SpillRegs[i].first;
1026 bool isKill = SpillRegs[i].second;
1027 if (!VRM.getPreSplitReg(VirtReg))
1028 continue; // Split interval spilled again.
1029 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1030 unsigned Phys = VRM.getPhys(VirtReg);
1031 int StackSlot = VRM.getStackSlot(VirtReg);
1032 TII->storeRegToStackSlot(MBB, next(MII), Phys, isKill, StackSlot, RC);
1033 MachineInstr *StoreMI = next(MII);
1034 DOUT << "Store:\t" << StoreMI;
1035 VRM.virtFolded(VirtReg, StoreMI, VirtRegMap::isMod);
1037 NextMII = next(MII);
1040 /// ReusedOperands - Keep track of operand reuse in case we need to undo
1042 ReuseInfo ReusedOperands(MI, TRI);
1043 SmallVector<unsigned, 4> VirtUseOps;
1044 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1045 MachineOperand &MO = MI.getOperand(i);
1046 if (!MO.isRegister() || MO.getReg() == 0)
1047 continue; // Ignore non-register operands.
1049 unsigned VirtReg = MO.getReg();
1050 if (TargetRegisterInfo::isPhysicalRegister(VirtReg)) {
1051 // Ignore physregs for spilling, but remember that it is used by this
1053 RegInfo->setPhysRegUsed(VirtReg);
1057 // We want to process implicit virtual register uses first.
1058 if (MO.isImplicit())
1059 VirtUseOps.insert(VirtUseOps.begin(), i);
1061 VirtUseOps.push_back(i);
1064 // Process all of the spilled uses and all non spilled reg references.
1065 for (unsigned j = 0, e = VirtUseOps.size(); j != e; ++j) {
1066 unsigned i = VirtUseOps[j];
1067 MachineOperand &MO = MI.getOperand(i);
1068 unsigned VirtReg = MO.getReg();
1069 assert(TargetRegisterInfo::isVirtualRegister(VirtReg) &&
1070 "Not a virtual register?");
1072 unsigned SubIdx = MO.getSubReg();
1073 if (VRM.isAssignedReg(VirtReg)) {
1074 // This virtual register was assigned a physreg!
1075 unsigned Phys = VRM.getPhys(VirtReg);
1076 RegInfo->setPhysRegUsed(Phys);
1078 ReusedOperands.markClobbered(Phys);
1079 unsigned RReg = SubIdx ? TRI->getSubReg(Phys, SubIdx) : Phys;
1080 MI.getOperand(i).setReg(RReg);
1084 // This virtual register is now known to be a spilled value.
1086 continue; // Handle defs in the loop below (handle use&def here though)
1088 bool DoReMat = VRM.isReMaterialized(VirtReg);
1089 int SSorRMId = DoReMat
1090 ? VRM.getReMatId(VirtReg) : VRM.getStackSlot(VirtReg);
1091 int ReuseSlot = SSorRMId;
1093 // Check to see if this stack slot is available.
1094 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SSorRMId);
1096 // If this is a sub-register use, make sure the reuse register is in the
1097 // right register class. For example, for x86 not all of the 32-bit
1098 // registers have accessible sub-registers.
1099 // Similarly so for EXTRACT_SUBREG. Consider this:
1101 // MOV32_mr fi#1, EDI
1103 // = EXTRACT_SUBREG fi#1
1104 // fi#1 is available in EDI, but it cannot be reused because it's not in
1105 // the right register file.
1107 (SubIdx || MI.getOpcode() == TargetInstrInfo::EXTRACT_SUBREG)) {
1108 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1109 if (!RC->contains(PhysReg))
1114 // This spilled operand might be part of a two-address operand. If this
1115 // is the case, then changing it will necessarily require changing the
1116 // def part of the instruction as well. However, in some cases, we
1117 // aren't allowed to modify the reused register. If none of these cases
1119 bool CanReuse = true;
1120 int ti = TID.getOperandConstraint(i, TOI::TIED_TO);
1122 MI.getOperand(ti).isRegister() &&
1123 MI.getOperand(ti).getReg() == VirtReg) {
1124 // Okay, we have a two address operand. We can reuse this physreg as
1125 // long as we are allowed to clobber the value and there isn't an
1126 // earlier def that has already clobbered the physreg.
1127 CanReuse = Spills.canClobberPhysReg(ReuseSlot) &&
1128 !ReusedOperands.isClobbered(PhysReg);
1132 // If this stack slot value is already available, reuse it!
1133 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1134 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1136 DOUT << "Reusing SS#" << ReuseSlot;
1137 DOUT << " from physreg "
1138 << TRI->getPrintableName(PhysReg) << " for vreg"
1139 << VirtReg <<" instead of reloading into physreg "
1140 << TRI->getPrintableName(VRM.getPhys(VirtReg)) << "\n";
1141 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1142 MI.getOperand(i).setReg(RReg);
1144 // The only technical detail we have is that we don't know that
1145 // PhysReg won't be clobbered by a reloaded stack slot that occurs
1146 // later in the instruction. In particular, consider 'op V1, V2'.
1147 // If V1 is available in physreg R0, we would choose to reuse it
1148 // here, instead of reloading it into the register the allocator
1149 // indicated (say R1). However, V2 might have to be reloaded
1150 // later, and it might indicate that it needs to live in R0. When
1151 // this occurs, we need to have information available that
1152 // indicates it is safe to use R1 for the reload instead of R0.
1154 // To further complicate matters, we might conflict with an alias,
1155 // or R0 and R1 might not be compatible with each other. In this
1156 // case, we actually insert a reload for V1 in R1, ensuring that
1157 // we can get at R0 or its alias.
1158 ReusedOperands.addReuse(i, ReuseSlot, PhysReg,
1159 VRM.getPhys(VirtReg), VirtReg);
1161 // Only mark it clobbered if this is a use&def operand.
1162 ReusedOperands.markClobbered(PhysReg);
1165 if (MI.getOperand(i).isKill() &&
1166 ReuseSlot <= VirtRegMap::MAX_STACK_SLOT) {
1167 // This was the last use and the spilled value is still available
1168 // for reuse. That means the spill was unnecessary!
1169 MachineInstr* DeadStore = MaybeDeadStores[ReuseSlot];
1171 DOUT << "Removed dead store:\t" << *DeadStore;
1172 InvalidateKills(*DeadStore, RegKills, KillOps);
1173 VRM.RemoveMachineInstrFromMaps(DeadStore);
1174 MBB.erase(DeadStore);
1175 MaybeDeadStores[ReuseSlot] = NULL;
1182 // Otherwise we have a situation where we have a two-address instruction
1183 // whose mod/ref operand needs to be reloaded. This reload is already
1184 // available in some register "PhysReg", but if we used PhysReg as the
1185 // operand to our 2-addr instruction, the instruction would modify
1186 // PhysReg. This isn't cool if something later uses PhysReg and expects
1187 // to get its initial value.
1189 // To avoid this problem, and to avoid doing a load right after a store,
1190 // we emit a copy from PhysReg into the designated register for this
1192 unsigned DesignatedReg = VRM.getPhys(VirtReg);
1193 assert(DesignatedReg && "Must map virtreg to physreg!");
1195 // Note that, if we reused a register for a previous operand, the
1196 // register we want to reload into might not actually be
1197 // available. If this occurs, use the register indicated by the
1199 if (ReusedOperands.hasReuses())
1200 DesignatedReg = ReusedOperands.GetRegForReload(DesignatedReg, &MI,
1201 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1203 // If the mapped designated register is actually the physreg we have
1204 // incoming, we don't need to inserted a dead copy.
1205 if (DesignatedReg == PhysReg) {
1206 // If this stack slot value is already available, reuse it!
1207 if (ReuseSlot > VirtRegMap::MAX_STACK_SLOT)
1208 DOUT << "Reusing RM#" << ReuseSlot-VirtRegMap::MAX_STACK_SLOT-1;
1210 DOUT << "Reusing SS#" << ReuseSlot;
1211 DOUT << " from physreg " << TRI->getPrintableName(PhysReg)
1212 << " for vreg" << VirtReg
1213 << " instead of reloading into same physreg.\n";
1214 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1215 MI.getOperand(i).setReg(RReg);
1216 ReusedOperands.markClobbered(RReg);
1221 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1222 RegInfo->setPhysRegUsed(DesignatedReg);
1223 ReusedOperands.markClobbered(DesignatedReg);
1224 TII->copyRegToReg(MBB, &MI, DesignatedReg, PhysReg, RC, RC);
1226 MachineInstr *CopyMI = prior(MII);
1227 UpdateKills(*CopyMI, RegKills, KillOps);
1229 // This invalidates DesignatedReg.
1230 Spills.ClobberPhysReg(DesignatedReg);
1232 Spills.addAvailable(ReuseSlot, &MI, DesignatedReg);
1234 SubIdx ? TRI->getSubReg(DesignatedReg, SubIdx) : DesignatedReg;
1235 MI.getOperand(i).setReg(RReg);
1236 DOUT << '\t' << *prior(MII);
1241 // Otherwise, reload it and remember that we have it.
1242 PhysReg = VRM.getPhys(VirtReg);
1243 assert(PhysReg && "Must map virtreg to physreg!");
1245 // Note that, if we reused a register for a previous operand, the
1246 // register we want to reload into might not actually be
1247 // available. If this occurs, use the register indicated by the
1249 if (ReusedOperands.hasReuses())
1250 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1251 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1253 RegInfo->setPhysRegUsed(PhysReg);
1254 ReusedOperands.markClobbered(PhysReg);
1256 ReMaterialize(MBB, MII, PhysReg, VirtReg, TRI, VRM);
1258 const TargetRegisterClass* RC = RegInfo->getRegClass(VirtReg);
1259 TII->loadRegFromStackSlot(MBB, &MI, PhysReg, SSorRMId, RC);
1262 // This invalidates PhysReg.
1263 Spills.ClobberPhysReg(PhysReg);
1265 // Any stores to this stack slot are not dead anymore.
1267 MaybeDeadStores[SSorRMId] = NULL;
1268 Spills.addAvailable(SSorRMId, &MI, PhysReg);
1269 // Assumes this is the last use. IsKill will be unset if reg is reused
1270 // unless it's a two-address operand.
1271 if (TID.getOperandConstraint(i, TOI::TIED_TO) == -1)
1272 MI.getOperand(i).setIsKill();
1273 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1274 MI.getOperand(i).setReg(RReg);
1275 UpdateKills(*prior(MII), RegKills, KillOps);
1276 DOUT << '\t' << *prior(MII);
1282 // If we have folded references to memory operands, make sure we clear all
1283 // physical registers that may contain the value of the spilled virtual
1285 SmallSet<int, 2> FoldedSS;
1286 for (tie(I, End) = VRM.getFoldedVirts(&MI); I != End; ++I) {
1287 unsigned VirtReg = I->second.first;
1288 VirtRegMap::ModRef MR = I->second.second;
1289 DOUT << "Folded vreg: " << VirtReg << " MR: " << MR;
1291 int SS = VRM.getStackSlot(VirtReg);
1292 if (SS == VirtRegMap::NO_STACK_SLOT)
1294 FoldedSS.insert(SS);
1295 DOUT << " - StackSlot: " << SS << "\n";
1297 // If this folded instruction is just a use, check to see if it's a
1298 // straight load from the virt reg slot.
1299 if ((MR & VirtRegMap::isRef) && !(MR & VirtRegMap::isMod)) {
1301 unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx);
1302 if (DestReg && FrameIdx == SS) {
1303 // If this spill slot is available, turn it into a copy (or nothing)
1304 // instead of leaving it as a load!
1305 if (unsigned InReg = Spills.getSpillSlotOrReMatPhysReg(SS)) {
1306 DOUT << "Promoted Load To Copy: " << MI;
1307 if (DestReg != InReg) {
1308 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1309 TII->copyRegToReg(MBB, &MI, DestReg, InReg, RC, RC);
1310 // Revisit the copy so we make sure to notice the effects of the
1311 // operation on the destreg (either needing to RA it if it's
1312 // virtual or needing to clobber any values if it's physical).
1314 --NextMII; // backtrack to the copy.
1317 DOUT << "Removing now-noop copy: " << MI;
1318 // Unset last kill since it's being reused.
1319 InvalidateKill(InReg, RegKills, KillOps);
1322 VRM.RemoveMachineInstrFromMaps(&MI);
1325 goto ProcessNextInst;
1328 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1329 SmallVector<MachineInstr*, 4> NewMIs;
1331 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, false, NewMIs)) {
1332 MBB.insert(MII, NewMIs[0]);
1333 VRM.RemoveMachineInstrFromMaps(&MI);
1336 --NextMII; // backtrack to the unfolded instruction.
1338 goto ProcessNextInst;
1343 // If this reference is not a use, any previous store is now dead.
1344 // Otherwise, the store to this stack slot is not dead anymore.
1345 MachineInstr* DeadStore = MaybeDeadStores[SS];
1347 bool isDead = !(MR & VirtRegMap::isRef);
1348 MachineInstr *NewStore = NULL;
1349 if (MR & VirtRegMap::isModRef) {
1350 unsigned PhysReg = Spills.getSpillSlotOrReMatPhysReg(SS);
1351 SmallVector<MachineInstr*, 4> NewMIs;
1352 // We can reuse this physreg as long as we are allowed to clobber
1353 // the value and there isn't an earlier def that has already clobbered
1356 !TII->isStoreToStackSlot(&MI, SS) && // Not profitable!
1357 DeadStore->findRegisterUseOperandIdx(PhysReg, true) != -1 &&
1358 TII->unfoldMemoryOperand(MF, &MI, PhysReg, false, true, NewMIs)) {
1359 MBB.insert(MII, NewMIs[0]);
1360 NewStore = NewMIs[1];
1361 MBB.insert(MII, NewStore);
1362 VRM.RemoveMachineInstrFromMaps(&MI);
1366 --NextMII; // backtrack to the unfolded instruction.
1372 if (isDead) { // Previous store is dead.
1373 // If we get here, the store is dead, nuke it now.
1374 DOUT << "Removed dead store:\t" << *DeadStore;
1375 InvalidateKills(*DeadStore, RegKills, KillOps);
1376 VRM.RemoveMachineInstrFromMaps(DeadStore);
1377 MBB.erase(DeadStore);
1382 MaybeDeadStores[SS] = NULL;
1384 // Treat this store as a spill merged into a copy. That makes the
1385 // stack slot value available.
1386 VRM.virtFolded(VirtReg, NewStore, VirtRegMap::isMod);
1387 goto ProcessNextInst;
1391 // If the spill slot value is available, and this is a new definition of
1392 // the value, the value is not available anymore.
1393 if (MR & VirtRegMap::isMod) {
1394 // Notice that the value in this stack slot has been modified.
1395 Spills.ModifyStackSlotOrReMat(SS);
1397 // If this is *just* a mod of the value, check to see if this is just a
1398 // store to the spill slot (i.e. the spill got merged into the copy). If
1399 // so, realize that the vreg is available now, and add the store to the
1400 // MaybeDeadStore info.
1402 if (!(MR & VirtRegMap::isRef)) {
1403 if (unsigned SrcReg = TII->isStoreToStackSlot(&MI, StackSlot)) {
1404 assert(TargetRegisterInfo::isPhysicalRegister(SrcReg) &&
1405 "Src hasn't been allocated yet?");
1406 // Okay, this is certainly a store of SrcReg to [StackSlot]. Mark
1407 // this as a potentially dead store in case there is a subsequent
1408 // store into the stack slot without a read from it.
1409 MaybeDeadStores[StackSlot] = &MI;
1411 // If the stack slot value was previously available in some other
1412 // register, change it now. Otherwise, make the register available,
1414 Spills.addAvailable(StackSlot, &MI, SrcReg, false/*don't clobber*/);
1420 // Process all of the spilled defs.
1421 for (unsigned i = 0, e = MI.getNumOperands(); i != e; ++i) {
1422 MachineOperand &MO = MI.getOperand(i);
1423 if (!(MO.isRegister() && MO.getReg() && MO.isDef()))
1426 unsigned VirtReg = MO.getReg();
1427 if (!TargetRegisterInfo::isVirtualRegister(VirtReg)) {
1428 // Check to see if this is a noop copy. If so, eliminate the
1429 // instruction before considering the dest reg to be changed.
1431 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1433 DOUT << "Removing now-noop copy: " << MI;
1436 VRM.RemoveMachineInstrFromMaps(&MI);
1437 Spills.disallowClobberPhysReg(VirtReg);
1438 goto ProcessNextInst;
1441 // If it's not a no-op copy, it clobbers the value in the destreg.
1442 Spills.ClobberPhysReg(VirtReg);
1443 ReusedOperands.markClobbered(VirtReg);
1445 // Check to see if this instruction is a load from a stack slot into
1446 // a register. If so, this provides the stack slot value in the reg.
1448 if (unsigned DestReg = TII->isLoadFromStackSlot(&MI, FrameIdx)) {
1449 assert(DestReg == VirtReg && "Unknown load situation!");
1451 // If it is a folded reference, then it's not safe to clobber.
1452 bool Folded = FoldedSS.count(FrameIdx);
1453 // Otherwise, if it wasn't available, remember that it is now!
1454 Spills.addAvailable(FrameIdx, &MI, DestReg, !Folded);
1455 goto ProcessNextInst;
1461 unsigned SubIdx = MO.getSubReg();
1462 bool DoReMat = VRM.isReMaterialized(VirtReg);
1464 ReMatDefs.insert(&MI);
1466 // The only vregs left are stack slot definitions.
1467 int StackSlot = VRM.getStackSlot(VirtReg);
1468 const TargetRegisterClass *RC = RegInfo->getRegClass(VirtReg);
1470 // If this def is part of a two-address operand, make sure to execute
1471 // the store from the correct physical register.
1473 int TiedOp = MI.getDesc().findTiedToSrcOperand(i);
1475 PhysReg = MI.getOperand(TiedOp).getReg();
1477 unsigned SuperReg = findSuperReg(RC, PhysReg, SubIdx, TRI);
1478 assert(SuperReg && TRI->getSubReg(SuperReg, SubIdx) == PhysReg &&
1479 "Can't find corresponding super-register!");
1483 PhysReg = VRM.getPhys(VirtReg);
1484 if (ReusedOperands.isClobbered(PhysReg)) {
1485 // Another def has taken the assigned physreg. It must have been a
1486 // use&def which got it due to reuse. Undo the reuse!
1487 PhysReg = ReusedOperands.GetRegForReload(PhysReg, &MI,
1488 Spills, MaybeDeadStores, RegKills, KillOps, VRM);
1492 RegInfo->setPhysRegUsed(PhysReg);
1493 unsigned RReg = SubIdx ? TRI->getSubReg(PhysReg, SubIdx) : PhysReg;
1494 ReusedOperands.markClobbered(RReg);
1495 MI.getOperand(i).setReg(RReg);
1498 MachineInstr *&LastStore = MaybeDeadStores[StackSlot];
1499 SpillRegToStackSlot(MBB, MII, -1, PhysReg, StackSlot, RC, true,
1500 LastStore, Spills, ReMatDefs, RegKills, KillOps, VRM);
1501 NextMII = next(MII);
1503 // Check to see if this is a noop copy. If so, eliminate the
1504 // instruction before considering the dest reg to be changed.
1507 if (TII->isMoveInstr(MI, Src, Dst) && Src == Dst) {
1509 DOUT << "Removing now-noop copy: " << MI;
1512 VRM.RemoveMachineInstrFromMaps(&MI);
1513 UpdateKills(*LastStore, RegKills, KillOps);
1514 goto ProcessNextInst;
1520 if (!Erased && !BackTracked) {
1521 for (MachineBasicBlock::iterator II = MI; II != NextMII; ++II)
1522 UpdateKills(*II, RegKills, KillOps);
1528 llvm::Spiller* llvm::createSpiller() {
1529 switch (SpillerOpt) {
1530 default: assert(0 && "Unreachable!");
1532 return new LocalSpiller();
1534 return new SimpleSpiller();